diff --git a/.gitignore b/.gitignore index 7b7fbaab95..b9488c633a 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,9 @@ # https://digitalfortress.tech/tricks/creating-a-global-gitignore/ build + +# tools +tools/e2e/e2e +tools/mqtt-bench/mqtt-bench +tools/provision/provision +tools/provision/mfconn.toml diff --git a/Makefile b/Makefile index aecea0cb80..e8b7f80ebb 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ MF_DOCKER_IMAGE_NAME_PREFIX ?= mainflux BUILD_DIR = build SERVICES = users things http coap ws lora influxdb-writer influxdb-reader mongodb-writer \ mongodb-reader cassandra-writer cassandra-reader postgres-writer postgres-reader timescale-writer timescale-reader cli \ - bootstrap opcua auth twins mqtt provision certs smtp-notifier smpp-notifier + bootstrap opcua twins mqtt provision certs smtp-notifier smpp-notifier DOCKERS = $(addprefix docker_,$(SERVICES)) DOCKERS_DEV = $(addprefix docker_dev_,$(SERVICES)) CGO_ENABLED ?= 0 @@ -78,7 +78,8 @@ test: proto: protoc -I. --go_out=. --go_opt=paths=source_relative pkg/messaging/*.proto - protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto + protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative users/policies/*.proto + protoc -I. --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative things/policies/*.proto $(SERVICES): $(call compile_service,$(@)) diff --git a/README.md b/README.md index db776cb305..27c8bbd7fc 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ For more details, check out the [official documentation][docs]. - Mutual TLS Authentication (mTLS) using X.509 Certificates - Fine-grained access control (policies, ABAC/RBAC) - Message persistence (Cassandra, InfluxDB, MongoDB and PostgresSQL) -- Platform logging and instrumentation support (Prometheus and OpenTracing) +- Platform logging and instrumentation support (Prometheus and OpenTelemetry) - Event sourcing - Container-based deployment using [Docker][docker] and [Kubernetes][kubernetes] - [LoRaWAN][lora] network integration diff --git a/api/openapi/auth.yml b/api/openapi/auth.yml deleted file mode 100644 index 3bb60279fb..0000000000 --- a/api/openapi/auth.yml +++ /dev/null @@ -1,803 +0,0 @@ -openapi: 3.0.1 -info: - title: Mainflux authentication service - description: HTTP API for managing platform API keys. - version: "1.0.0" -paths: - /keys: - post: - summary: Issue API key - description: | - Generates a new API key. Thew new API key will - be uniquely identified by its ID. - tags: - - auth - requestBody: - $ref: "#/components/requestBodies/KeyRequest" - responses: - '201': - description: Issued new key. - '400': - description: Failed due to malformed JSON. - '409': - description: Failed due to using already existing ID. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - - get: - summary: Lists API key - description: | - List the API keys issued by the logged in user. - tags: - - auth - parameters: - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Subject" - - $ref: "#/components/parameters/Type" - responses: - '201': - description: Issued new key. - '400': - description: Failed due to malformed JSON. - '409': - description: Failed due to using already existing ID. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - /keys/{keyID}: - get: - summary: Gets API key details. - description: | - Gets API key details for the given key. - tags: - - auth - parameters: - - $ref: "#/components/parameters/ApiKeyId" - responses: - '200': - $ref: "#/components/responses/KeyRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '500': - $ref: "#/components/responses/ServiceError" - delete: - summary: Revoke API key - description: | - Revoke API key identified by the given ID. - tags: - - auth - parameters: - - $ref: "#/components/parameters/ApiKeyId" - responses: - '204': - description: Key revoked. - '401': - description: Missing or invalid access token provided. - '500': - $ref: "#/components/responses/ServiceError" - /groups: - post: - summary: Creates new group - description: | - Creates new group that can be used for grouping entities - things, users. - tags: - - auth - requestBody: - $ref: "#/components/requestBodies/GroupCreateReq" - responses: - '201': - $ref: "#/components/responses/GroupCreateRes" - '400': - description: Failed due to malformed JSON. - '409': - description: Failed due to using an existing email address. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - get: - summary: Gets all groups. - description: | - Gets all groups up to a max level of hierarchy that can be fetched in one - request ( max level = 5). Result can be filtered by metadata. Groups will - be returned as JSON array or JSON tree. - tags: - - auth - parameters: - - $ref: "#/components/parameters/Level" - - $ref: "#/components/parameters/Metadata" - - $ref: "#/components/parameters/Tree" - responses: - '200': - $ref: "#/components/responses/GroupsPageRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}: - get: - summary: Gets group info. - description: | - Gets info on a group specified by id. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - responses: - '200': - $ref: "#/components/responses/GroupRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - put: - summary: Updates group data. - description: | - Updates Name, Description or Metadata of a group. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - requestBody: - $ref: "#/components/requestBodies/GroupUpdateReq" - responses: - '200': - description: Group updated. - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - delete: - summary: Deletes group. - description: | - Deletes group. If group is parent and descendant groups do not have any members - child groups will be deleted. Group cannot be deleted if has members or if - any descendant group has members. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - - $ref: "#/components/parameters/Level" - - $ref: "#/components/parameters/Metadata" - - $ref: "#/components/parameters/Tree" - responses: - '204': - description: Group removed. - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}/children: - get: - summary: Gets group children. - description: | - Gets the whole tree of descendants of group for given id including itself. - For performance reason request is limited up to a given level of hierarchy - (max. 5). - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - - $ref: "#/components/parameters/Level" - - $ref: "#/components/parameters/Metadata" - - $ref: "#/components/parameters/Tree" - responses: - '200': - $ref: "#/components/responses/GroupsPageRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}/parents: - get: - summary: Gets group info. - description: | - Gets a direct line of ancestors for a group specified by id. - Result is up to a specified hierarchy level or up to a root group. - Result can be a JSON array or a JSON tree. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - - $ref: "#/components/parameters/Level" - - $ref: "#/components/parameters/Metadata" - - $ref: "#/components/parameters/Tree" - responses: - '200': - $ref: "#/components/responses/GroupsPageRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Group does not exist. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}/members: - get: - summary: Gets members of a group. - description: | - Array of member ids that are in the group specified with groupID. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Limit" - responses: - '200': - $ref: "#/components/responses/MembersRes" - '401': - description: Missing or invalid access token provided. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}/members/assign: - post: - summary: Assigns members to a group. - description: | - Assigns thing or user id to a group. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - requestBody: - $ref: "#/components/requestBodies/MembersReq" - responses: - '201': - $ref: "#/components/responses/GroupCreateRes" - '400': - description: Failed due to malformed JSON. - '401': - description: Missing or invalid access token provided. - '409': - description: Failed due to using an existing email address. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}/members/unassign: - post: - summary: Unassigns members to a group. - description: | - Unassigns thing or user id to a group. - tags: - - auth - parameters: - - $ref: "#/components/parameters/GroupId" - requestBody: - $ref: "#/components/requestBodies/MembersReq" - responses: - '201': - $ref: "#/components/responses/GroupCreateRes" - '400': - description: Failed due to malformed JSON. - '401': - description: Missing or invalid access token provided. - '409': - description: Failed due to using an existing email address. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - /groups/{userGroupID}/share: - post: - summary: Adds access rights on thing groups to user group with userGroupID. - description: | - Takes user group id through parameter and adds access rights for user group on thing group received via request body. - tags: - - auth - parameters: - - $ref: "#/components/parameters/UserGroupID" - requestBody: - $ref: "#/components/requestBodies/ShareGroupAccessReq" - responses: - '200': - description: User group shared with thing group. - '400': - description: Failed due to malformed JSON. - '401': - description: Missing or invalid access token provided. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - /members/{memberID}/groups: - get: - summary: Gets memberships for a member with member id. - description: | - Array of groups that member belongs to. - tags: - - auth - parameters: - - $ref: "#/components/parameters/MemberId" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Metadata" - responses: - '200': - $ref: "#/components/responses/GroupRes" - '401': - description: Missing or invalid access token provided. - '500': - $ref: "#/components/responses/ServiceError" - /policies: - post: - summary: Creates new policies. - description: | - Creates new policies. Only admin can use this endpoint. Therefore, you need an authentication token for the admin. - Also, only policies defined on the system are allowed to add. For more details, please see the docs for Authorization. - tags: - - auth - requestBody: - $ref: "#/components/requestBodies/PoliciesReq" - responses: - '201': - description: Policies created. - '400': - description: Failed due to malformed JSON. - '401': - description: Missing or invalid access token provided. - '403': - description: Unauthorized access token provided. - '409': - description: Failed due to using an existing email address. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - put: - summary: Deletes policies. - description: | - Deletes policies. Only admin can use this endpoint. Therefore, you need an authentication token for the admin. - Also, only policies defined on the system are allowed to delete. For more details, please see the docs for Authorization. - tags: - - auth - requestBody: - $ref: "#/components/requestBodies/PoliciesReq" - responses: - '204': - description: Policies deleted. - '400': - description: Failed due to malformed JSON. - '409': - description: Failed due to using an existing email address. - '415': - description: Missing or invalid content type. - '500': - $ref: "#/components/responses/ServiceError" - /health: - get: - summary: Retrieves service health check info. - tags: - - health - responses: - '200': - $ref: "#/components/responses/HealthRes" - '500': - $ref: "#/components/responses/ServiceError" - -components: - schemas: - Key: - type: object - properties: - id: - type: string - format: uuid - example: "c5747f2f-2a7c-4fe1-b41a-51a5ae290945" - description: API key unique identifier - issuer_id: - type: string - format: uuid - example: "9118de62-c680-46b7-ad0a-21748a52833a" - description: In ID of the entity that issued the token. - type: - type: integer - example: 0 - description: API key type. Keys of different type are processed differently. - subject: - type: string - format: string - example: "test@example.com" - description: User's email or service identifier of API key subject. - issued_at: - type: string - format: date-time - example: "2019-11-26 13:31:52" - description: Time when the key is generated. - expires_at: - type: string - format: date-time - example: "2019-11-26 13:31:52" - description: Time when the Key expires. If this field is missing, - that means that Key is valid indefinitely. - GroupReqSchema: - type: object - properties: - name: - type: string - description: | - Free-form group name. Group name is unique on the given hierarchy level. - description: - type: string - description: Group description, free form text. - parent_id: - type: string - format: ulid - description: Id of parent group, it must be existing group. - metadata: - type: object - description: Arbitrary, object-encoded group's data. - GroupUpdateSchema: - type: object - properties: - name: - type: string - description: | - Free-form group name. Group name is unique on the given hierarchy level. - description: - type: string - description: Group description, free form text. - metadata: - type: object - description: Arbitrary, object-encoded group's data. - GroupResSchema: - type: object - properties: - id: - type: string - format: ulid - description: Unique group identifier generated by the service. - name: - type: string - description: Free-form group name. - parent_id: - type: string - description: Group ID of parent group. - owner_id: - type: string - format: uuid - description: UUID of user that created the group. - metadata: - type: object - description: Arbitrary, object-encoded group's data. - level: - type: integer - description: Level in hierarchy, distance from the root group. - path: - type: string - description: Hierarchy path, concatenated ids of group ancestors. - children: - type: object - # schema: GroupResSchema - created_at: - type: string - description: Datetime of group creation. - updated_at: - type: string - description: Datetime of last group updated. - required: - - id - - name - - owner_id - - description - - level - - path - - created_at - - updated_at - MembersReqSchema: - type: object - properties: - members: - type: array - minItems: 0 - uniqueItems: true - items: - type: string - format: uuid | ulid - type: - type: string - description: Type of entity - ShareGroupAccessReqSchema: - type: object - properties: - thing_group_id: - type: string - description: Group ID of the Thing Group. - format: uuid - GroupsPage: - type: object - properties: - groups: - type: array - minItems: 0 - uniqueItems: true - items: - $ref: "#/components/schemas/GroupResSchema" - total: - type: integer - description: Total number of items. - level: - type: integer - description: Level of hierarchy up to which groups are fetched. - required: - - groups - - total - - level - MembershipPage: - type: object - properties: - groups: - type: array - minItems: 0 - uniqueItems: true - items: - $ref: "#/components/schemas/GroupResSchema" - offset: - type: integer - description: Number of items to skip during retrieval. - limit: - type: integer - description: Maximum number of items to return in one page. - total: - type: integer - description: Total number of items. - required: - - groups - PoliciesReqSchema: - type: object - properties: - object: - type: string - description: | - Specifies an object field for the field. - Object indicates application objects such as ThingID. - subjects: - type: array - minItems: 1 - uniqueItems: true - items: - type: string - policies: - type: array - minItems: 1 - uniqueItems: true - items: - type: string - - parameters: - ApiKeyId: - name: keyID - description: API Key ID. - in: path - schema: - type: string - format: uuid - required: true - UserGroupID: - name: userGroupID - description: User Group ID. - in: path - schema: - type: string - format: uuid - required: true - GroupId: - name: groupID - description: Group ID. - in: path - schema: - type: string - format: uuid - required: true - MemberId: - name: memberID - description: Member id. - in: path - schema: - type: string - format: uuid | ulid - required: true - Limit: - name: limit - description: Size of the subset to retrieve. - in: query - schema: - type: integer - default: 10 - maximum: 100 - minimum: 1 - required: false - Offset: - name: offset - description: Number of items to skip during retrieval. - in: query - schema: - type: integer - default: 0 - minimum: 0 - required: false - Level: - name: level - description: Level of hierarchy up to which to retrieve groups from given group id. - in: query - schema: - type: integer - minimum: 1 - maximum: 5 - required: false - Metadata: - name: metadata - description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. - in: query - required: false - schema: - type: object - additionalProperties: {} - Tree: - name: tree - description: Specify type of response, JSON array or tree. - in: query - required: false - schema: - type: boolean - default: false - Type: - name: type - description: The type of the API Key. - in: query - schema: - type: integer - default: 0 - minimum: 0 - required: false - Subject: - name: subject - description: The subject of an API Key - in: query - schema: - type: string - required: false - - requestBodies: - KeyRequest: - description: JSON-formatted document describing key request. - required: true - content: - application/json: - schema: - type: object - properties: - type: - type: integer - example: 0 - description: API key type. Keys of different type are processed differently. - duration: - type: number - format: integer - example: 23456 - description: Number of seconds issued token is valid for. - GroupCreateReq: - description: JSON-formatted document describing group create request. - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/GroupReqSchema" - GroupUpdateReq: - description: JSON-formatted document describing group create request. - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/GroupUpdateSchema" - MembersReq: - description: JSON array of member IDs. - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/MembersReqSchema" - ShareGroupAccessReq: - description: test - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/ShareGroupAccessReqSchema" - PoliciesReq: - description: JSON-formatted document describing adding policies request. - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/PoliciesReqSchema" - - responses: - ServiceError: - description: Unexpected server-side error occurred. - KeyRes: - description: Data retrieved. - content: - application/json: - schema: - $ref: "#/components/schemas/Key" - GroupCreateRes: - description: Group created. - headers: - Location: - content: - text/plain: - schema: - type: string - description: Created group's relative URL. - example: /groups/{groupId} - ShareAccessRightRes: - description: User group shared with thing group. - GroupRes: - description: Data retrieved. - content: - application/json: - schema: - $ref: "#/components/schemas/GroupResSchema" - GroupsPageRes: - description: Group data retrieved. - content: - application/json: - schema: - $ref: "#/components/schemas/GroupsPage" - MembersRes: - description: Groups data retrieved. Groups assigned to a member. - content: - application/json: - schema: - $ref: "#/components/schemas/MembershipPage" - MembershipPageRes: - description: Groups data retrieved. Groups assigned to a member. - content: - application/json: - schema: - $ref: "#/components/schemas/MembershipPage" - HealthRes: - description: Service Health Check. - content: - application/json: - schema: - $ref: "./schemas/HealthInfo.yml" - - securitySchemes: - bearerAuth: - type: http - scheme: bearer - bearerFormat: JWT - description: | - * Users access: "Authorization: Bearer " - -security: - - bearerAuth: [] diff --git a/api/openapi/bootstrap.yml b/api/openapi/bootstrap.yml index 1de3de33c2..c1611fa7ed 100644 --- a/api/openapi/bootstrap.yml +++ b/api/openapi/bootstrap.yml @@ -49,7 +49,7 @@ paths: description: Missing or invalid access token provided. '500': $ref: "#/components/responses/ServiceError" - /things/configs/{configID}: + /things/configs/{configId}: get: summary: Retrieves config info (with channels). tags: @@ -108,7 +108,7 @@ paths: description: Missing or invalid access token provided. '500': $ref: "#/components/responses/ServiceError" - /things/configs/certs/{configID}: + /things/configs/certs/{configId}: patch: summary: Updates certs description: | @@ -133,7 +133,7 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" - /things/configs/connections/{configID}: + /things/configs/connections/{configId}: put: summary: Updates channels the thing is connected to description: | @@ -158,7 +158,7 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" - /things/bootstrap/{externalID}: + /things/bootstrap/{externalId}: get: summary: Retrieves configuration. description: | @@ -180,7 +180,7 @@ paths: description: Failed to retrieve corresponding config. '500': $ref: "#/components/responses/ServiceError" - /things/bootstrap/secure/{externalID}: + /things/bootstrap/secure/{externalId}: get: summary: Retrieves configuration. description: | @@ -199,7 +199,7 @@ paths: Failed to retrieve corresponding config. '500': $ref: "#/components/responses/ServiceError" - /things/state/{configID}: + /things/state/{configId}: put: summary: Updates Config state. description: | @@ -338,7 +338,7 @@ components: parameters: ConfigId: - name: configID + name: configId description: Unique Config identifier. It's the ID of the corresponding Thing. in: path schema: @@ -346,7 +346,7 @@ components: format: uuid required: true ExternalId: - name: externalID + name: externalId description: Unique Config identifier provided by external entity. in: path schema: diff --git a/api/openapi/certs.yml b/api/openapi/certs.yml index 77d014e01e..41414fc3bb 100644 --- a/api/openapi/certs.yml +++ b/api/openapi/certs.yml @@ -135,6 +135,12 @@ components: expire: type: string description: Certificate expiry date + Serial: + type: object + properties: + serial: + type: string + description: Certificate serial CertsPage: type: object properties: diff --git a/api/openapi/consumers-notifiers.yml b/api/openapi/consumers-notifiers.yml index a8ef46e916..f3d454fbd3 100644 --- a/api/openapi/consumers-notifiers.yml +++ b/api/openapi/consumers-notifiers.yml @@ -42,7 +42,7 @@ paths: description: Missing or invalid access token provided. "500": $ref: "#/components/responses/ServiceError" - /subscriptions/{subID}: + /subscriptions/{id}: get: summary: Get subscription with the provided id description: Retrieves a subscription with the provided id. @@ -137,7 +137,7 @@ components: parameters: Id: - name: subID + name: id description: Unique identifier. in: path schema: @@ -197,7 +197,7 @@ components: schema: type: string description: Created subscription relative URL - example: /subscriptions/{subId} + example: /subscriptions/{id} View: description: View subscription. content: diff --git a/api/openapi/http.yml b/api/openapi/http.yml index c8d059b6ba..bd8ab79bfa 100644 --- a/api/openapi/http.yml +++ b/api/openapi/http.yml @@ -4,7 +4,7 @@ info: description: HTTP API for sending messages through communication channels. version: "1.0.0" paths: - /channels/{chanID}/messages: + /channels/{id}/messages: post: summary: Sends message to the communication channel description: | @@ -106,7 +106,7 @@ components: parameters: ID: - name: chanID + name: id description: Unique channel identifier. in: path schema: diff --git a/api/openapi/readers.yml b/api/openapi/readers.yml index dbfda2df63..50e4a264ad 100644 --- a/api/openapi/readers.yml +++ b/api/openapi/readers.yml @@ -5,7 +5,7 @@ info: version: "1.0.0" paths: - /channels/{chanID}/messages: + /channels/{chanId}/messages: get: summary: Retrieves messages sent to single channel description: | @@ -107,7 +107,7 @@ components: parameters: ChanId: - name: chanID + name: chanId description: Unique channel identifier. in: path schema: diff --git a/api/openapi/things.yml b/api/openapi/things.yml index 56956356cd..1a12656cd2 100644 --- a/api/openapi/things.yml +++ b/api/openapi/things.yml @@ -1,90 +1,100 @@ -openapi: 3.0.1 +openapi: 3.0.3 info: - title: Mainflux things service - description: HTTP API for managing platform things and channels. - version: "1.0.0" + title: Mainflux Things Service + description: | + This is the Things Server based on the OpenAPI 3.0 specification. It is the HTTP API for managing platform things and channels. You can now help us improve the API whether it's by making changes to the definition itself or to the code. + Some useful links: + - [The Mainflux repository](https://github.com/mainflux/mainflux) + - [The Mainflux Postman Collection](https://github.com/mainflux/mainflux/blob/master/api/postman/postman.yaml) + contact: + email: info@mainflux.com + license: + name: Apache 2.0 + url: https://github.com/mainflux/mainflux/blob/master/LICENSE + version: 0.14.0 + +servers: + - url: http://localhost:9000 + - url: https://localhost:9000 + +tags: + - name: Things + description: Everything about your Things + externalDocs: + description: Find out more about things + url: http://docs.mainflux.io/ + - name: Channels + description: Everything about your Channels + externalDocs: + description: Find out more about things channels + url: http://docs.mainflux.io/ + - name: Policies + description: Access to things policies + externalDocs: + description: Find out more about things policies + url: http://docs.mainflux.io/ paths: /things: post: + tags: + - Things summary: Adds new thing description: | Adds new thing to the list of things owned by user identified using the provided access token. - tags: - - things requestBody: $ref: "#/components/requestBodies/ThingCreateReq" responses: '201': - $ref: "#/components/responses/CreateThingRes" + $ref: "#/components/responses/ThingCreateRes" '400': description: Failed due to malformed JSON. '401': description: Missing or invalid access token provided. '409': - description: Entity already exist. + description: Failed due to using an existing identity. '415': description: Missing or invalid content type. '422': - description: Database can't process request. + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + get: + tags: + - Things summary: Retrieves things description: | Retrieves a list of things. Due to performance concerns, data is retrieved in subsets. The API things must ensure that the entire dataset is consumed either by making subsequent requests, or by increasing the subset size of the initial request. - tags: - - things parameters: - $ref: "#/components/parameters/Limit" - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Name" - - $ref: "#/components/parameters/Order" - - $ref: "#/components/parameters/Direction" - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/Status" + - $ref: "#/components/parameters/ThingName" + - $ref: "#/components/parameters/Tags" + - $ref: "#/components/parameters/Owner" + security: + - bearerAuth: [] responses: '200': - $ref: "#/components/responses/ThingsPageRes" + $ref: "#/components/responses/ThingPageRes" '400': description: Failed due to malformed query parameters. '401': - description: Missing or invalid access token provided. + description: | + Missing or invalid access token provided. '404': description: A non-existent entity request. '422': description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" - /things/search: - post: - summary: Search and retrieves things - description: | - Retrieves a list of things with name and metadata filtering. - Due to performance concerns, data is retrieved in subsets. - The API things must ensure that the entire - dataset is consumed either by making subsequent requests, or by - increasing the subset size of the initial request. - tags: - - things - requestBody: - $ref: "#/components/requestBodies/ThingsSearchReq" - responses: - '200': - $ref: "#/components/responses/ThingsPageRes" - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: A non-existent entity request. - '422': - description: Unprocessable Entity - '500': - $ref: "#/components/responses/ServiceError" + /things/bulk: post: summary: Bulk provisions new things @@ -92,12 +102,12 @@ paths: Adds new things to the list of things owned by user identified using the provided access token. tags: - - things + - Things requestBody: $ref: "#/components/requestBodies/ThingsCreateReq" responses: '201': - description: Things registered. + $ref: "#/components/responses/ThingPageRes" '400': description: Failed due to malformed JSON. '401': @@ -106,67 +116,191 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + + /things/{thingID}: get: summary: Retrieves thing info + description: | + Retrieves a specific thing that is identifier by the thing ID. tags: - - things + - Things parameters: - - $ref: "#/components/parameters/ThingId" + - $ref: "#/components/parameters/ThingID" + security: + - bearerAuth: [] responses: '200': $ref: "#/components/responses/ThingRes" '401': description: Missing or invalid access token provided. '404': - description: Thing does not exist. + description: A non-existent entity request. '422': description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" - put: - summary: Updates thing info + + patch: + summary: Updates name and metadata of the thing. description: | Update is performed by replacing the current resource data with values provided in a request payload. Note that the thing's type and ID cannot be changed. tags: - - things + - Things parameters: - - $ref: "#/components/parameters/ThingId" + - $ref: "#/components/parameters/ThingID" requestBody: $ref: "#/components/requestBodies/ThingUpdateReq" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/ThingRes" + '400': + description: Failed due to malformed JSON. + '401': + description: Missing or invalid access token provided. + '404': + description: Failed due to non existing thing. + '415': + description: Missing or invalid content type. + '500': + $ref: "#/components/responses/ServiceError" + + /things/{thingID}/tags: + patch: + summary: Updates tags the thing. + description: | + Updates tags of the thing with provided ID. Tags is updated using + authorization token and the new tags received in request. + tags: + - Things + parameters: + - $ref: "#/components/parameters/ThingID" + requestBody: + $ref: "#/components/requestBodies/ThingUpdateTagsReq" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/ThingRes" + '400': + description: Failed due to malformed JSON. + '404': + description: Failed due to non existing thing. + '401': + description: Missing or invalid access token provided. + '500': + $ref: "#/components/responses/ServiceError" + + /things/{thingID}/owner: + patch: + summary: Updates the thing owner. + description: | + Updates owner for the thing with provided ID. Owner is updated using + authorization token and a new owner identifier received in request. + tags: + - Things + parameters: + - $ref: "#/components/parameters/ThingID" + requestBody: + $ref: "#/components/requestBodies/ThingUpdateOwnerReq" + security: + - bearerAuth: [] responses: '200': - description: Thing updated. + $ref: "#/components/responses/ThingRes" '400': description: Failed due to malformed JSON. + '404': + description: Failed due to non existing thing. '401': description: Missing or invalid access token provided. + '500': + $ref: "#/components/responses/ServiceError" + + /things/{thingID}/secret: + patch: + summary: Updates Secret of the identified thing. + description: | + Updates secret of the identified in thing. Secret is updated using + authorization token and the new received info. Update is performed by replacing current key with a new one. + tags: + - Things + parameters: + - $ref: "#/components/parameters/ThingID" + requestBody: + $ref: "#/components/requestBodies/ThingUpdateSecretReq" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/ThingRes" + '400': + description: Failed due to malformed JSON. + '401': + description: Missing or invalid access token provided. '404': - description: Thing does not exist. + description: Failed due to non existing thing. + '409': + description: Specified key already exists. '415': - description: Missing or invalid content type. + description: Missing or invalid content type. + '500': + $ref: "#/components/responses/ServiceError" + + /things/{thingID}/disable: + post: + summary: Disables a thing + description: | + Disables a specific thing that is identifier by the thing ID. + tags: + - Things + parameters: + - $ref: "#/components/parameters/ThingID" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/ThingRes" + '400': + description: Failed due to malformed thing's ID. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" - delete: - summary: Removes a thing + + /things/{thingID}/enable: + post: + summary: Enables a thing description: | - Removes a thing. The service will ensure that the removed thing is - disconnected from all of the existing channels. + Enables a specific thing that is identifier by the thing ID. tags: - - things + - Things parameters: - - $ref: "#/components/parameters/ThingId" + - $ref: "#/components/parameters/ThingID" + security: + - bearerAuth: [] responses: - '204': - description: Thing removed. + '200': + $ref: "#/components/responses/ThingRes" '400': description: Failed due to malformed thing's ID. '401': description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + /things/{thingID}/share: post: summary: Shares a thing with user identified by request body. @@ -174,9 +308,9 @@ paths: Adds 'read', 'write' or 'delete' policies to the user identified by the request body. Sharing a particular thing is only allowed to users who have 'write' access to that thing. tags: - - things + - Things parameters: - - $ref: "#/components/parameters/ThingId" + - $ref: "#/components/parameters/ThingID" requestBody: $ref: "#/components/requestBodies/ShareThingReq" responses: @@ -192,42 +326,45 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" - /things/{thingID}/key: - patch: - summary: Updates thing key + + /things/{thingID}/channels: + get: + summary: List of channels connected to specified thing description: | - Update is performed by replacing current key with a new one. + Retrieves list of channels connected to specified thing with pagination + metadata. tags: - - things + - Things parameters: - - $ref: "#/components/parameters/ThingId" - requestBody: - $ref: "#/components/requestBodies/KeyUpdateReq" + - $ref: "#/components/parameters/ThingID" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Limit" responses: '200': - description: Thing key updated. + $ref: "#/components/responses/ChannelPageRes" '400': - description: Failed due to malformed JSON. + description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. '404': description: Thing does not exist. - '409': - description: Specified key already exists. - '415': - description: Missing or invalid content type. + '422': + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + /channels: post: + tags: + - Channels summary: Creates new channel description: | Creates new channel. User identified by the provided access token will be the channel's owner. - tags: - - channels requestBody: $ref: "#/components/requestBodies/ChannelCreateReq" + security: + - bearerAuth: [] responses: '201': $ref: "#/components/responses/ChannelCreateRes" @@ -236,38 +373,43 @@ paths: '401': description: Missing or invalid access token provided. '409': - description: Entity already exist. + description: Failed due to using an existing identity. '415': description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + get: - summary: Retrieves channels + summary: Lists channels. description: | Retrieves a list of channels. Due to performance concerns, data is retrieved in subsets. The API things must ensure that the entire dataset is consumed either by making subsequent requests, or by increasing the subset size of the initial request. tags: - - channels + - Channels + security: + - bearerAuth: [] parameters: - $ref: "#/components/parameters/Limit" - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Name" - - $ref: "#/components/parameters/Order" - - $ref: "#/components/parameters/Direction" - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/ChannelName" + - $ref: "#/components/parameters/OwnerId" responses: '200': - $ref: "#/components/responses/ChannelsPageRes" + $ref: "#/components/responses/ChannelPageRes" '400': description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. + '404': + description: Channel does not exist. '422': - description: Database can't process request. + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + /channels/bulk: post: summary: Bulk provisions new channels @@ -275,7 +417,7 @@ paths: Adds new channels to the list of channels owned by user identified using the provided access token. tags: - - channels + - Channels requestBody: $ref: "#/components/requestBodies/ChannelsCreateReq" responses: @@ -291,13 +433,18 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + /channels/{chanID}: get: - summary: Retrieves channel info + summary: Retrieves channel info. + description: | + Gets info on a channel specified by id. tags: - - channels + - Channels parameters: - - $ref: "#/components/parameters/ChanId" + - $ref: "#/components/parameters/chanID" + security: + - bearerAuth: [] responses: '200': $ref: "#/components/responses/ChannelRes" @@ -308,52 +455,115 @@ paths: '404': description: Channel does not exist. '422': - description: Database can't process request. + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + put: - summary: Updates channel info + summary: Updates channel data. description: | Update is performed by replacing the current resource data with values provided in a request payload. Note that the channel's ID will not be affected. tags: - - channels + - Channels parameters: - - $ref: "#/components/parameters/ChanId" + - $ref: "#/components/parameters/chanID" + security: + - bearerAuth: [] requestBody: - $ref: "#/components/requestBodies/ChannelCreateReq" + $ref: "#/components/requestBodies/ChannelUpdateReq" responses: '200': - description: Channel updated. + $ref: "#/components/responses/ChannelRes" '400': - description: Failed due to malformed JSON. + description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. '404': description: Channel does not exist. '415': - description: Missing or invalid content type. + description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" - delete: - summary: Removes a channel + + /channels/{chanID}/enable: + post: + summary: Enables a channel description: | - Removes a channel. The service will ensure that the subscribed apps and - things are unsubscribed from the removed channel. + Enables a specific channel that is identifier by the channel ID. tags: - - channels + - Channels parameters: - - $ref: "#/components/parameters/ChanId" + - $ref: "#/components/parameters/chanID" + security: + - bearerAuth: [] responses: - '204': - description: Channel removed. + '200': + $ref: "#/components/responses/ChannelRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /channels/{chanID}/disable: + post: + summary: Disables a channel + description: | + Disables a specific channel that is identifier by the channel ID. + tags: + - Channels + parameters: + - $ref: "#/components/parameters/chanID" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/ChannelRes" '400': description: Failed due to malformed channel's ID. '401': description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /channels/{chanID}/things: + get: + summary: List of things connected to specified channel + description: | + Retrieves list of things connected to specified channel with pagination + metadata. + tags: + - Channels + parameters: + - $ref: "#/components/parameters/chanID" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Connected" + responses: + '200': + $ref: "#/components/responses/ThingsPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" + /connect: post: summary: Connects thing and channel. @@ -361,7 +571,7 @@ paths: Connect things specified by IDs to channels specified by IDs. Channel and thing are owned by user identified using the provided access token. tags: - - things + - Policies requestBody: $ref: "#/components/requestBodies/ConnCreateReq" responses: @@ -379,18 +589,19 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + /disconnect: - put: + post: summary: Disconnect things and channels using lists of IDs. description: | Disconnect things from channels specified by lists of IDs. Channels and things are owned by user identified using the provided access token. tags: - - things + - Policies requestBody: $ref: "#/components/requestBodies/DisconnReq" responses: - '200': + '204': $ref: "#/components/responses/DisconnRes" '400': description: Failed due to malformed JSON. @@ -401,102 +612,53 @@ paths: '415': description: Missing or invalid content type. '500': - $ref: "#/components/responses/ServiceError" - /things/{thingID}/channels: - get: - summary: List of channels connected to specified thing + $ref: "#/components/responses/ServiceError" + + /channels/{chanID}/things/{thingID}: + post: + summary: Connects the thing to the channel description: | - Retrieves list of channels connected to specified thing with pagination - metadata. + Creates connection between a thing and a channel. Once connected to + the channel, things are allowed to exchange messages through it. tags: - - channels + - Policies parameters: - - $ref: "#/components/parameters/ThingId" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Connected" + - $ref: "#/components/parameters/chanID" + - $ref: "#/components/parameters/ThingID" responses: '200': - $ref: "#/components/responses/ChannelsPageRes" + $ref: "#/components/responses/ConnCreateRes" '400': description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. '404': - description: Thing does not exist. - '422': - description: Database can't process request. + description: Channel or thing does not exist. '500': $ref: "#/components/responses/ServiceError" - /channels/{chanID}/things: - get: - summary: List of things connected to specified channel + + delete: + summary: Disconnects the thing from the channel description: | - Retrieves list of things connected to specified channel with pagination - metadata. + Removes connection between a thing and a channel. Once connection is + removed, thing can no longer exchange messages through the channel. tags: - - things + - Policies parameters: - - $ref: "#/components/parameters/ChanId" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Connected" + - $ref: "#/components/parameters/chanID" + - $ref: "#/components/parameters/ThingID" responses: - '200': - $ref: "#/components/responses/ThingsPageRes" + '204': + description: Thing disconnected. '400': description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. '404': - description: A non-existent entity request. - '422': - description: Database can't process request. + description: Channel or thing does not exist. '500': - $ref: "#/components/responses/ServiceError" - /channels/{chanID}/things/{thingID}: - put: - summary: Connects the thing to the channel - description: | - Creates connection between a thing and a channel. Once connected to - the channel, things are allowed to exchange messages through it. - tags: - - channels - parameters: - - $ref: "#/components/parameters/ChanId" - - $ref: "#/components/parameters/ThingId" - responses: - '200': - description: Thing connected. - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Channel or thing does not exist. - '500': - $ref: "#/components/responses/ServiceError" - delete: - summary: Disconnects the thing from the channel - description: | - Removes connection between a thing and a channel. Once connection is - removed, thing can no longer exchange messages through the channel. - tags: - - channels - parameters: - - $ref: "#/components/parameters/ChanId" - - $ref: "#/components/parameters/ThingId" - responses: - '204': - description: Thing disconnected. - '400': - description: Failed due to malformed query parameters. - '401': - description: Missing or invalid access token provided. - '404': - description: Channel or thing does not exist. - '500': - $ref: "#/components/responses/ServiceError" + $ref: "#/components/responses/ServiceError" + /identify/channels/{chanID}/access-by-key: post: summary: Checks if thing has access to a channel. @@ -504,11 +666,11 @@ paths: Checks if a thing with a specified key has an access to a specified channel and if it has, it returns that things id. tags: - - access + - Policies parameters: - - $ref: "#/components/parameters/ChanId" + - $ref: "#/components/parameters/chanID" requestBody: - $ref: "#/components/requestBodies/IdentityReq" + $ref: "#/components/requestBodies/AccessByIDReq" responses: '200': $ref: "#/components/responses/AccessGrantedRes" @@ -520,6 +682,7 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + /identify/channels/{chanID}/access-by-id: post: summary: Checks if thing has access to a channel. @@ -527,9 +690,9 @@ paths: Checks if a thing with a specified ID has an access to a specified channel. tags: - - access + - Policies parameters: - - $ref: "#/components/parameters/ChanId" + - $ref: "#/components/parameters/chanID" requestBody: $ref: "#/components/requestBodies/AccessByIDReq" responses: @@ -543,6 +706,7 @@ paths: description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + /identify: post: summary: Validates thing's key and returns it's ID if key is valid. @@ -550,7 +714,7 @@ paths: Validates thing's key and returns it's ID if specified key exists and is valid. tags: - - identity + - Policies requestBody: $ref: "#/components/requestBodies/IdentityReq" responses: @@ -561,37 +725,84 @@ paths: '415': description: Missing or invalid content type. '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}: + $ref: "#/components/responses/ServiceError" + get: - summary: Retrieves things + summary: Fetches policy data. description: | - Retrieves a list of things that belong to a group. Due to performance concerns, data - is retrieved in subsets. The API things must ensure that the entire - dataset is consumed either by making subsequent requests, or by - increasing the subset size of the initial request. + List available policies. tags: - - things + - Policies parameters: - - $ref: "#/components/parameters/GroupId" - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Order" - - $ref: "#/components/parameters/Direction" - - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Subject" + - $ref: "#/components/parameters/Object" + - $ref: "#/components/parameters/Actions" + security: + - bearerAuth: [] responses: '200': - $ref: "#/components/responses/ThingsPageRes" + $ref: "#/components/responses/PolicyPageRes" '400': description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. '404': - description: A non-existent entity request. - '422': - description: Database can't process request. + description: Channel does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + put: + summary: Updates policy data. + description: | + Updates Actions of a policy. + tags: + - Policies + parameters: + - $ref: "#/components/parameters/Subject" + - $ref: "#/components/parameters/Object" + - $ref: "#/components/parameters/Actions" + security: + - bearerAuth: [] + requestBody: + $ref: "#/components/requestBodies/PolicyUpdateReq" + responses: + '200': + description: Channel updated. + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Channel does not exist. '500': $ref: "#/components/responses/ServiceError" + + /policies/{obj}/{sub}: + delete: + tags: + - Policies + summary: Delete policy + description: | + Delete specified policies + security: + - bearerAuth: [] + parameters: + - $ref: "#/components/parameters/Obj" + - $ref: "#/components/parameters/Sub" + responses: + '200': + $ref: "#/components/responses/PolicyPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Policy does not exist. + '500': + $ref: "#/components/responses/ServiceError" + /health: get: summary: Retrieves service health check info. @@ -605,92 +816,367 @@ paths: components: schemas: - Key: - type: string - format: uuid - description: | - Thing key that is used for thing auth. If there is - not one provided service will generate one in UUID - format. - Identity: + ThingReqObj: type: object properties: - id: + name: + type: string + example: thingName + description: Thing name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['tag1', 'tag2'] + description: Thing tags. + credentials: + type: object + properties: + identity: + type: string + example: "thingidentity" + description: Thing's identity will be used as its unique identifier + secret: + type: string + format: password + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + minimum: 8 + description: Free-form account secret used for acquiring auth token(s). + owner: type: string format: uuid - description: Thing unique identifier. This can be either - provided by the user or left blank. If the user provides a UUID, - it would be validated. If there is not one provided then - the service will generate one in UUID format. - ThingReqSchema: + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing owner must be exsiting in the databse. + metadata: + type: object + example: {"domain": "example.com"} + description: Arbitrary, object-encoded thing's data. + status: + type: string + description: Thing Status + format: string + example: enabled + required: + - credentials + + ChannelReqObj: + type: object + properties: + name: + type: string + example: channelName + description: Free-form channel name. Channel name is unique on the given hierarchy level. + description: + type: string + example: long channel description + description: Channel description, free form text. + parent_id: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Id of parent channel, it must be existing channel. + metadata: + type: object + example: {"domain": "example.com"} + description: Arbitrary, object-encoded channels's data. + status: + type: string + description: Channel Status + format: string + example: enabled + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Channel owner ID must be exsiting in the databse. + required: + - name + + PolicyReqObj: + type: object + properties: + subject: + type: string + description: Policy subject refers to the thing id + example: 'bb7edb32-2eac-4aad-aebe-ed96fe073879' + object: + type: string + description: Policy object refers to either the thing id, channel id, computation id or dataset id + example: 'bb7edb32-2eac-4aad-aebe-ed96fe073879' + actions: + type: array + minItems: 0 + items: + type: string + example: ['m_write', 'g_add'] + description: Policy actions. + required: + - subject + - object + - actions + + Thing: type: object properties: - key: - $ref: "#/components/schemas/Key" + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing unique identifier. name: type: string - description: Free-form thing name. + example: thingName + description: Thing name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['tag1', 'tag2'] + description: Thing tags. + owner: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing owner identifier. + credentials: + type: object + properties: + identity: + type: string + example: thingidentity + description: Thing Identity for example email address. + secret: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing secret password. metadata: type: object + example: {"domain": "example.com"} description: Arbitrary, object-encoded thing's data. - ThingsReqSchema: + status: + type: string + description: Thing Status + format: string + example: enabled + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the channel was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the channel was created. + xml: + name: thing + + Channel: type: object properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Unique channel identifier generated by the service. name: type: string - description: Name filter. Filtering is performed as a case-insensitive partial match. + example: channelName + description: Free-form channel name. Channel name is unique on the given hierarchy level. + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Channel owner identifier of thing that created the channel.. + parent_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Channel parent identifier. + description: + type: string + example: long channel description + description: Channel description, free form text. metadata: type: object - description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. - total: - type: integer - description: Total number of items. - offset: + example: {"role": "general"} + description: Arbitrary, object-encoded channels's data. + path: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879.bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Hierarchy path, concatenated ids of channel ancestors. + level: type: integer - description: Number of items to skip during retrieval. - default: 0 - minimum: 0 - limit: + description: Level in hierarchy, distance from the root channel. + format: int32 + example: 2 + maximum: 5 + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the channel was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the channel was created. + status: + type: string + description: Channel Status + format: string + example: enabled + xml: + name: channel + + Memberships: + type: object + properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Unique channel identifier generated by the service. + name: + type: string + example: channelName + description: Free-form channel name. Channel name is unique on the given hierarchy level. + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Channel owner identifier of thing that created the channel.. + parent_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Channel parent identifier. + description: + type: string + example: long channel description + description: Channel description, free form text. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded channels's data. + path: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879.bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Hierarchy path, concatenated ids of channel ancestors. + level: type: integer - description: Size of the subset to retrieve. - default: 10 - maximum: 100 - minimum: 1 - order: + description: Level in hierarchy, distance from the root channel. + format: int32 + example: 2 + created_at: type: string - description: Order type. - default: id - enum: - - name - - id - dir: + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the channel was created. + updated_at: type: string - description: Order direction. - default: desc - enum: - - asc - - desc - ThingResSchema: + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the channel was created. + xml: + name: memberships + + Members: type: object properties: id: type: string format: uuid - description: Unique thing identifier generated by the service. + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing unique identifier. name: type: string - description: Free-form thing name. - key: + example: thingName + description: Thing name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['computations', 'datasets'] + description: Thing tags. + owner: type: string format: uuid - description: Auto-generated access key. + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing owner identifier. + credentials: + type: object + properties: + identity: + type: string + example: thing@mainflux.com + description: Thing Identity for example email address. + secret: + type: string + example: password + description: Thing secret password. metadata: type: object + example: {"role": "general"} description: Arbitrary, object-encoded thing's data. - required: - - id - - type - - key + status: + type: string + description: Thing Status + format: string + example: enabled + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the channel was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the channel was created. + xml: + name: members + + Policy: + type: object + properties: + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy owner identifier. + subject: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy subject identifier. + object: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy object identifier. + actions: + type: array + minItems: 0 + items: + type: string + example: ['m_write', 'g_add'] + description: Policy actions. + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the policy was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the policy was updated. + xml: + name: policy + ThingsPage: type: object properties: @@ -699,253 +1185,600 @@ components: minItems: 0 uniqueItems: true items: - $ref: "#/components/schemas/ThingResSchema" + $ref: "#/components/schemas/Thing" total: type: integer + example: 1 description: Total number of items. offset: type: integer description: Number of items to skip during retrieval. limit: type: integer + example: 10 description: Maximum number of items to return in one page. required: - things - ChannelReqSchema: + - total + - offset + + ChannelsPage: type: object properties: - name: - type: string - description: Free-form channel name. - metadata: - type: object - description: Arbitrary, object-encoded channel's data. - ChannelResSchema: + channels: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Channel" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. + required: + - channels + - total + - level + + MembershipsPage: type: object properties: - id: - type: string - description: Unique channel identifier generated by the service. - name: - type: string - description: Free-form channel name. - metadata: - type: object - description: Arbitrary, object-encoded channel's data. + memberships: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Memberships" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. required: - - id - ChannelsPage: + - memberships + - total + - level + + MembersPage: type: object properties: - channels: + members: type: array minItems: 0 uniqueItems: true items: - $ref: "#/components/schemas/ChannelResSchema" + $ref: "#/components/schemas/Members" total: type: integer + example: 1 description: Total number of items. offset: type: integer description: Number of items to skip during retrieval. limit: type: integer + example: 10 description: Maximum number of items to return in one page. required: - - channels + - members + - total + - level + + PoliciesPage: + type: object + properties: + policies: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Policy" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. + required: + - policies + - total + - offset + + ThingUpdate: + type: object + properties: + name: + type: string + example: thingName + description: Thing name. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded thing's data. + required: + - name + - metadata + + ThingTags: + type: object + properties: + tags: + type: array + example: ['tag1', 'tag2'] + description: Thing tags. + minItems: 0 + uniqueItems: true + items: + type: string + + ThingSecret: + type: object + properties: + secret: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: New thing secret. + required: + - secret + + ThingOwner: + type: object + properties: + owner: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Thing owner for example email address. + required: + - owner + + ChannelUpdate: + type: object + properties: + name: + type: string + example: channelName + description: Free-form channel name. Channel name is unique on the given hierarchy level. + description: + type: string + example: long description but not too long + description: Channel description, free form text. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded channels's data. + required: + - name + - metadata + - description + + PolicyUpdate: + type: object + properties: + actions: + type: array + example: ['m_write', 'g_add'] + description: Policy actions. + minItems: 0 + uniqueItems: true + items: + type: string + + Identity: + type: object + properties: + id: + type: string + format: uuid + description: Thing unique identifier. This can be either + provided by the user or left blank. If the user provides a UUID, + it would be validated. If there is not one provided then + the service will generate one in UUID format. + ConnectionReqSchema: type: object properties: - channel_ids: + group_ids: type: array description: Channel IDs. items: - type: string - thing_ids: + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + client_ids: type: array description: Thing IDs items: - type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + ShareThingReqSchema: type: object properties: - user_id: - type: string - description: User ID. + user_ids: + type: array + description: User IDs. items: - type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 policies: type: array description: Policies items: - type: string + example: c_update + + Error: + type: object + properties: + error: + type: string + description: Error message + example: {"error": "malformed entity specification"} + HealthRes: + type: object + properties: + status: + type: string + description: Service status. + enum: + - pass + version: + type: string + description: Service version. + example: 0.14.0 + commit: + type: string + description: Service commit hash. + example: 7d6f4dc4f7f0c1fa3dc24eddfb18bb5073ff4f62 + description: + type: string + description: Service description. + example: things service + build_time: + type: string + description: Service build time. + example: 1970-01-01_00:00:00 + parameters: - ChanId: - name: chanID - description: Unique channel identifier. - in: path - schema: - type: string - format: uuid - required: true - ThingId: - name: thingID - description: Unique thing identifier. - in: path - schema: - type: string - format: uuid - required: true - GroupId: - name: groupID - description: Unique group identifier. - in: path - schema: - type: string - format: ulid - required: true - Limit: - name: limit - description: Size of the subset to retrieve. - in: query - schema: - type: integer - default: 10 - maximum: 100 - minimum: 1 - required: false - Offset: - name: offset - description: Number of items to skip during retrieval. - in: query - schema: - type: integer - default: 0 - minimum: 0 - required: false - Connected: - name: connected - description: Connection state of the subset to retrieve. - in: query - schema: - type: boolean - default: true - required: false - Name: - name: name - description: Name filter. Filtering is performed as a case-insensitive partial match. - in: query - schema: - type: string - required: false - Order: - name: order - description: Order type. - in: query - schema: - type: string - default: id - enum: - - name - - id - required: false - Direction: - name: dir - description: Order direction. - in: query - schema: - type: string - default: desc - enum: - - asc - - desc - required: false - Metadata: - name: metadata - description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. - in: query - required: false - schema: - type: object - additionalProperties: {} + ThingID: + name: thingID + description: Unique thing identifier. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + ThingName: + name: name + description: Thing's name. + in: query + schema: + type: string + required: false + example: 'thingName' + + ThingIdentity: + name: identity + description: Thing's identity. + in: query + schema: + type: string + required: false + example: 'admin@example.com' + + Owner: + name: owner + description: Thing's owner. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + ThingOwner: + name: owner + description: Unique owner identifier for a thing. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Status: + name: status + description: Thing account status. + in: query + schema: + type: string + default: enabled + required: false + example: enabled + + Tags: + name: tags + description: Thing tags. + in: query + schema: + type: array + minItems: 0 + uniqueItems: true + items: + type: string + required: false + example: ['yello', 'orange'] + + ChannelName: + name: name + description: Channel's name. + in: query + schema: + type: string + required: false + example: 'channelName' + + ChannelDescription: + name: name + description: Channel's description. + in: query + schema: + type: string + required: false + example: 'channel description' + + chanID: + name: chanID + description: Unique channel identifier. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + ParentId: + name: parentId + description: Unique parent identifier for a channel. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Level: + name: level + description: Level of hierarchy up to which to retrieve channels from given channel id. + in: query + schema: + type: integer + minimum: 1 + maximum: 5 + required: false + + Tree: + name: tree + description: Specify type of response, JSON array or tree. + in: query + required: false + schema: + type: boolean + default: false + + OwnerId: + name: ownerId + description: Unique owner identifier for a channel. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Subject: + name: subject + description: Unique subject identifier for a policy. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Object: + name: object + description: Unique object identifier for a policy. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + Actions: + name: actions + description: Policy action types. + in: query + schema: + type: array + minItems: 0 + uniqueItems: true + items: + type: string + required: false + example: ['m_write', 'g_add'] + + Sub: + name: sub + description: Unique subject identifier for a policy. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Obj: + name: obj + description: Unique object identifier for a policy. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Metadata: + name: metadata + description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. + in: query + schema: + type: string + minimum: 0 + required: false + + Limit: + name: limit + description: Size of the subset to retrieve. + in: query + schema: + type: integer + default: 10 + maximum: 100 + minimum: 1 + required: false + example: '100' + + Offset: + name: offset + description: Number of items to skip during retrieval. + in: query + schema: + type: integer + default: 0 + minimum: 0 + required: false + example: '0' + + Connected: + name: connected + description: Connection state of the subset to retrieve. + in: query + schema: + type: boolean + default: true + required: false + requestBodies: ThingCreateReq: - description: JSON-formatted document describing the new thing. + description: JSON-formatted document describing the new thing to be registered required: true content: application/json: schema: - $ref: "#/components/schemas/ThingReqSchema" - ThingsCreateReq: - description: JSON-formatted document describing the new things. + $ref: '#/components/schemas/ThingReqObj' + + ThingUpdateReq: + description: JSON-formated document describing the metadata and name of thing to be update required: true content: application/json: schema: - type: object - properties: - key: - $ref: "#/components/schemas/Key" - things: - type: array - items: - $ref: "#/components/schemas/ThingReqSchema" - ThingUpdateReq: - description: Arbitrary, object-encoded thing's data. + $ref: "#/components/schemas/ThingUpdate" + + ThingUpdateTagsReq: + description: JSON-formated document describing the tags of thing to be update required: true content: application/json: schema: - type: object - properties: - name: - type: string - description: Free-form thing name. - metadata: - type: object - ThingsSearchReq: - description: JSON-formatted document describing search parameters. + $ref: "#/components/schemas/ThingTags" + + ThingUpdateSecretReq: + description: Secret change data. Thing can change its secret. required: true content: application/json: schema: - $ref: "#/components/schemas/ThingsReqSchema" - KeyUpdateReq: + $ref: '#/components/schemas/ThingSecret' + + ThingUpdateOwnerReq: + description: JSON-formated document describing the owner of thing to be update required: true - description: JSON containing thing. content: application/json: schema: - type: object - properties: - key: - type: string - format: uuid - description: Thing key that is used for thing auth. + $ref: "#/components/schemas/ThingOwner" + ChannelCreateReq: - description: JSON-formatted document describing the updated channel. + description: JSON-formatted document describing the new channel to be registered + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ChannelReqObj' + + ChannelUpdateReq: + description: JSON-formated document describing the metadata and name of channel to be update required: true content: application/json: schema: - $ref: "#/components/schemas/ChannelReqSchema" + $ref: "#/components/schemas/ChannelUpdate" + + PolicyCreateReq: + description: JSON-formatted document describing the new channel to be registered + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PolicyReqObj' + + PolicyUpdateReq: + description: JSON-formated document describing the actions of a policy to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PolicyUpdate" + + ThingsCreateReq: + description: JSON-formatted document describing the new things. + required: true + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/ThingReqObj" + ChannelsCreateReq: description: JSON-formatted document describing the new channels. required: true content: application/json: schema: - type: object - properties: - key: - $ref: "#/components/schemas/Key" - things: - type: array - items: - $ref: "#/components/schemas/ChannelReqSchema" + type: array + items: + $ref: "#/components/schemas/ChannelReqObj" + ConnCreateReq: description: JSON-formatted document describing the new connection. required: true @@ -953,6 +1786,7 @@ components: application/json: schema: $ref: "#/components/schemas/ConnectionReqSchema" + DisconnReq: description: JSON-formatted document describing the entities for disconnection. required: true @@ -960,6 +1794,7 @@ components: application/json: schema: $ref: "#/components/schemas/ConnectionReqSchema" + IdentityReq: description: JSON-formatted document that contains thing key. required: true @@ -974,6 +1809,7 @@ components: description: Thing key that is used for thing auth. required: - token + AccessByIDReq: description: JSON-formatted document that contains thing key. required: true @@ -982,10 +1818,21 @@ components: schema: type: object properties: - thing_id: + client_id: type: string format: uuid description: Thing ID by which thing is uniquely identified. + action: + type: string + format: string + example: "m_read" + description: Action you want to check access against. + entity_type: + type: string + format: string + example: group + description: Entity Type. + ShareThingReq: description: JSON-formatted document describing sharing things policies. required: true @@ -995,61 +1842,117 @@ components: $ref: "#/components/schemas/ShareThingReqSchema" responses: - CreateThingRes: - description: Thing registered. + ThingCreateRes: + description: Registered new thing. headers: Location: - content: - text/plain: - schema: - type: string - description: Created thing's relative URL. - example: /things/{thingId} + schema: + type: string + format: url + description: Registered thing relative URL in the format `/things/` + content: + application/json: + schema: + $ref: "#/components/schemas/Thing" + ThingRes: description: Data retrieved. content: application/json: schema: - $ref: "#/components/schemas/ThingResSchema" + $ref: "#/components/schemas/Thing" + + ThingPageRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/ThingsPage" + ThingsPageRes: description: Data retrieved. content: application/json: schema: $ref: "#/components/schemas/ThingsPage" + + MembershipsPageRes: + description: Memberships associated with the thing. + content: + application/json: + schema: + $ref: "#/components/schemas/MembershipsPage" + ChannelCreateRes: - description: Channel created. + description: Registered new channel. headers: Location: - content: - text/plain: - schema: - type: string - description: Created channel's relative URL (i.e. /channels/{chanId}). + schema: + type: string + format: url + description: Registered channel relative URL in the format `/channels/` + content: + application/json: + schema: + $ref: "#/components/schemas/Channel" + ChannelRes: description: Data retrieved. content: application/json: schema: - $ref: "#/components/schemas/ChannelResSchema" - ChannelsPageRes: + $ref: "#/components/schemas/Channel" + + ChannelPageRes: description: Data retrieved. content: application/json: schema: $ref: "#/components/schemas/ChannelsPage" - ConnCreateRes: - description: Thing registered. + + MembersPageRes: + description: Channel members retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/MembersPage" + + PolicyCreateRes: + description: Registered new policy. headers: Location: content: text/plain: schema: type: string - description: Created thing's relative URL. - example: /things/{thingId} + format: url + description: Registered policy relative URL. + example: /policy/{subject}/{object} + + PolicyRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/Policy" + + PolicyPageRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/PoliciesPage" + + ConnCreateRes: + description: Thing registered. + content: + application/json: + schema: + $ref: "#/components/schemas/PoliciesPage" + DisconnRes: description: Things disconnected. + AccessGrantedRes: description: | Thing has access to the specified channel and the thing ID is returned. @@ -1057,33 +1960,35 @@ components: application/json: schema: $ref: "#/components/schemas/Identity" + IdentityRes: description: Thing ID returned. content: application/json: schema: $ref: "#/components/schemas/Identity" - ServiceError: - description: Unexpected server-side error occurred. - content: - application/json: - schema: - type: string - format: byte + HealthRes: description: Service Health Check. + content: + application/health+json: + schema: + $ref: "#/components/schemas/HealthRes" + + ServiceError: + description: Unexpected server-side error occurred. content: application/json: schema: - $ref: "./schemas/HealthInfo.yml" - + $ref: "#/components/schemas/Error" + securitySchemes: bearerAuth: type: http scheme: bearer bearerFormat: JWT description: | - * Users access: "Authorization: Bearer " - + * Thing access: "Authorization: Bearer " + security: - bearerAuth: [] diff --git a/api/openapi/users.yml b/api/openapi/users.yml index 937e1cd23f..40d123600a 100644 --- a/api/openapi/users.yml +++ b/api/openapi/users.yml @@ -1,18 +1,48 @@ -openapi: 3.0.1 +openapi: 3.0.3 info: - title: Mainflux users service - description: HTTP API for managing platform users. - version: "1.0.0" + title: Mainflux Users Service + description: | + This is the Users Server based on the OpenAPI 3.0 specification. It is the HTTP API for managing platform users. You can now help us improve the API whether it's by making changes to the definition itself or to the code. + Some useful links: + - [The Mainflux repository](https://github.com/mainflux/mainflux) + - [The Mainflux Postman Collection](https://github.com/mainflux/mainflux/blob/master/api/postman/postman.yaml) + contact: + email: info@mainflux.com + license: + name: Apache 2.0 + url: https://github.com/mainflux/mainflux/blob/master/LICENSE + version: 0.14.0 + +servers: + - url: http://localhost:8180 + - url: https://localhost:8180 + +tags: + - name: Users + description: Everything about your Users + externalDocs: + description: Find out more about users + url: http://docs.mainflux.io/ + - name: Groups + description: Everything about your Groups + externalDocs: + description: Find out more about users groups + url: http://docs.mainflux.io/ + - name: Policies + description: Access to user policies + externalDocs: + description: Find out more about users policies + url: http://docs.mainflux.io/ paths: /users: post: + tags: + - Users summary: Registers user account description: | Registers new user account given email and password. New account will be uniquely identified by its email address. - tags: - - users requestBody: $ref: "#/components/requestBodies/UserCreateReq" responses: @@ -23,52 +53,110 @@ paths: '401': description: Missing or invalid access token provided. '409': - description: Failed due to using an existing email address. + description: Failed due to using an existing identity. '415': description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" + get: - summary: Retrieves users + tags: + - Users + summary: List users description: | Retrieves a list of users. Due to performance concerns, data is retrieved in subsets. The API things must ensure that the entire dataset is consumed either by making subsequent requests, or by increasing the subset size of the initial request. - tags: - - users parameters: - $ref: "#/components/parameters/Limit" - $ref: "#/components/parameters/Offset" - $ref: "#/components/parameters/Metadata" - $ref: "#/components/parameters/Status" + - $ref: "#/components/parameters/UserName" + - $ref: "#/components/parameters/UserIdentity" + - $ref: "#/components/parameters/Tags" + - $ref: "#/components/parameters/Owner" + - $ref: "#/components/parameters/UserVisibility" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/UserPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: | + Missing or invalid access token provided. + This endpoint is available only for administrators. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /users/profile: + get: + summary: Gets info on currently logged in user. + description: | + Gets info on currently logged in user. Info is obtained using + authorization token + tags: + - Users + security: + - bearerAuth: [] responses: '200': - $ref: "#/components/responses/UsersPageRes" + $ref: "#/components/responses/UserRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '500': + $ref: "#/components/responses/ServiceError" + + /users/{userID}: + get: + summary: Retrieves a user + description: | + Retrieves a specific user that is identifier by the user ID. + tags: + - Users + parameters: + - $ref: "#/components/parameters/UserID" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/UserRes" '400': description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. - '403': - description: This endpoint is available only for administrators. '404': description: A non-existent entity request. '422': description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" - put: - summary: Updates info on currently logged in user. + + patch: + summary: Updates name and metadata of the user. description: | - Updates info on currently logged in user. Info is updated using - authorization token and the new received info. + Updates name and metadata of the user with provided ID. Name and metadata + is updated using authorization token and the new received info. tags: - - users + - Users + parameters: + - $ref: "#/components/parameters/UserID" requestBody: $ref: "#/components/requestBodies/UserUpdateReq" + security: + - bearerAuth: [] responses: '200': - description: User updated. + $ref: "#/components/responses/UserRes" '400': description: Failed due to malformed JSON. '404': @@ -77,59 +165,99 @@ paths: description: Missing or invalid access token provided. '500': $ref: "#/components/responses/ServiceError" - /users/profile: - get: - summary: Gets info on currently logged in user. + + /users/{userID}/tags: + patch: + summary: Updates tags the user. description: | - Gets info on currently logged in user. Info is obtained using - authorization token + Updates tags of the user with provided ID. Tags is updated using + authorization token and the new tags received in request. tags: - - users + - Users + parameters: + - $ref: "#/components/parameters/UserID" + requestBody: + $ref: "#/components/requestBodies/UserUpdateTagsReq" + security: + - bearerAuth: [] responses: '200': $ref: "#/components/responses/UserRes" '400': - description: Failed due to malformed query parameters. + description: Failed due to malformed JSON. + '404': + description: Failed due to non existing user. '401': description: Missing or invalid access token provided. '500': - $ref: "#/components/responses/ServiceError" - /users/{userID}: - get: - summary: Gets info on specified user. + $ref: "#/components/responses/ServiceError" + + /users/{userID}/identity: + patch: + summary: Updates Identity of the user. description: | - Gets info on the specified user. Info is obtained using userId + Updates identity of the user with provided ID. Identity is + updated using authorization token and the new received identity. tags: - - users + - Users parameters: - - $ref: "#/components/parameters/UserId" + - $ref: "#/components/parameters/UserID" + requestBody: + $ref: "#/components/requestBodies/UserUpdateIdentityReq" + security: + - bearerAuth: [] responses: '200': $ref: "#/components/responses/UserRes" '400': - description: Failed due to malformed query parameters. + description: Failed due to malformed JSON. + '404': + description: Failed due to non existing user. '401': description: Missing or invalid access token provided. '500': - $ref: "#/components/responses/ServiceError" - /groups/{groupID}: - get: - summary: Retrieves users + $ref: "#/components/responses/ServiceError" + + /users/{userID}/owner: + patch: + summary: Updates the user owner. description: | - Retrieves a list of users that belong to a group. Due to performance concerns, data - is retrieved in subsets. The API things must ensure that the entire - dataset is consumed either by making subsequent requests, or by - increasing the subset size of the initial request. + Updates owner for the user with provided ID. Owner is updated using + authorization token and a new owner identifier received in request. tags: - - users + - Users parameters: - - $ref: "#/components/parameters/GroupId" - - $ref: "#/components/parameters/Limit" - - $ref: "#/components/parameters/Offset" - - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/UserID" + requestBody: + $ref: "#/components/requestBodies/UserUpdateOwnerReq" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/UserRes" + '400': + description: Failed due to malformed JSON. + '404': + description: Failed due to non existing user. + '401': + description: Missing or invalid access token provided. + '500': + $ref: "#/components/responses/ServiceError" + + /users/{userID}/disable: + post: + summary: Disables a user + description: | + Disables a specific user that is identifier by the user ID. + tags: + - Users + parameters: + - $ref: "#/components/parameters/UserID" + security: + - bearerAuth: [] responses: '200': - $ref: "#/components/responses/UsersPageRes" + $ref: "#/components/responses/UserRes" '400': description: Failed due to malformed query parameters. '401': @@ -140,41 +268,58 @@ paths: description: Database can't process request. '500': $ref: "#/components/responses/ServiceError" - /tokens: + + /users/{userID}/enable: post: - summary: User authentication - description: Generates an access token when provided with proper credentials. + summary: Enables a user + description: | + Enables a specific user that is identifier by the user ID. tags: - - users + - Users + parameters: + - $ref: "#/components/parameters/UserID" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/UserRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /users/secret: + patch: + summary: Updates Secret of currently logged in user. + description: | + Updates secret of currently logged in user. Secret is updated using + authorization token and the new received info. + tags: + - Users + parameters: + - $ref: "#/components/parameters/UserID" requestBody: - $ref: "#/components/requestBodies/UserCreateReq" + $ref: "#/components/requestBodies/UserUpdateSecretReq" + security: + - bearerAuth: [] responses: - '201': - description: User authenticated. - content: - application/json: - schema: - $ref: '#/components/schemas/Token' + '200': + $ref: "#/components/responses/UserRes" '400': description: Failed due to malformed JSON. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' + '404': + description: Failed due to non existing user. '401': - description: Failed due to using invalid credentials. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - '415': - description: Missing or invalid content type. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' + description: Missing or invalid access token provided. '500': - $ref: '#/components/responses/ServiceError' + $ref: "#/components/responses/ServiceError" + /password/reset-request: post: summary: User password reset request @@ -182,29 +327,30 @@ paths: Generates a reset token and sends and email with link for resetting password. tags: - - users + - Users parameters: - - $ref: "#/components/parameters/Referer" + - $ref: "#/components/parameters/Referrer" requestBody: $ref: '#/components/requestBodies/RequestPasswordReset' responses: '201': - description: Users link for reseting password. + description: Users link for resetting password. '400': description: Failed due to malformed JSON. '415': description: Missing or invalid content type. '500': $ref: '#/components/responses/ServiceError' + /password/reset: put: summary: User password reset endpoint description: | - When user gets reset token, after he submited + When user gets reset token, after he submitted email to `/password/reset-request`, posting a new password along to this endpoint will change password. tags: - - users + - Users requestBody: $ref: '#/components/requestBodies/PasswordReset' responses: @@ -216,210 +362,1324 @@ paths: description: Missing or invalid content type. '500': $ref: '#/components/responses/ServiceError' - /password: - patch: - summary: User password change endpoint + + /users/{userID}/memberships: + get: + tags: + - Users + summary: List memberships + description: | + Retrieves a list of groups the user is connected to + parameters: + - $ref: "#/components/parameters/UserID" + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Level" + - $ref: "#/components/parameters/Tree" + - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/GroupName" + - $ref: "#/components/parameters/ParentId" + - $ref: "#/components/parameters/OwnerId" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/MembershipsPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: | + Missing or invalid access token provided. + This endpoint is available only for administrators. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /users/tokens/issue: + post: + summary: Issue Token description: | - When authenticated user wants to change password. + Issue Access and Refresh Token used for authenticating into the system. tags: - - users + - Users + requestBody: + $ref: "#/components/requestBodies/IssueTokenReq" + responses: + '200': + $ref: "#/components/responses/TokenRes" + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /users/tokens/refresh: + post: + summary: Refresh Token + description: | + Refreshes Access and Refresh Token used for authenticating into the system. + tags: + - Users + security: + - refreshAuth: [] + responses: + '200': + $ref: "#/components/responses/TokenRes" + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /groups: + post: + tags: + - Groups + summary: Creates new group + description: | + Creates new group that can be used for grouping entities. New account will + be uniquely identified by its identity. requestBody: - $ref: '#/components/requestBodies/PasswordChange' + $ref: "#/components/requestBodies/GroupCreateReq" + security: + - bearerAuth: [] responses: '201': - description: User link . + $ref: "#/components/responses/GroupCreateRes" '400': description: Failed due to malformed JSON. + '401': + description: Missing or invalid access token provided. + '409': + description: Failed due to using an existing identity. '415': description: Missing or invalid content type. '500': $ref: "#/components/responses/ServiceError" - /users/{userID}/enable: - post: - summary: Enables a user account + + get: + summary: Lists groups. description: | - Enables a disabled user account for a given user ID. + Lists groups up to a max level of hierarchy that can be fetched in one + request ( max level = 5). Result can be filtered by metadata. Groups will + be returned as JSON array or JSON tree. Due to performance concerns, result + is returned in subsets. tags: - - users + - Groups + security: + - bearerAuth: [] parameters: - - $ref: "#/components/parameters/UserId" - responses: + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Level" + - $ref: "#/components/parameters/Tree" + - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/GroupName" + - $ref: "#/components/parameters/ParentId" + - $ref: "#/components/parameters/OwnerId" + responses: '200': - description: User enabled. + $ref: "#/components/responses/GroupPageRes" '400': - description: Failed due to malformed JSON. - '404': - description: Failed due to non existing user. + description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. + '404': + description: Group does not exist. '500': - $ref: "#/components/responses/ServiceError" - /users/{userID}/disable: - post: - summary: Disables a user account + $ref: "#/components/responses/ServiceError" + + /groups/{groupId}: + get: + summary: Gets group info. description: | - Disables a user account for a given user ID. + Gets info on a group specified by id. tags: - - users + - Groups parameters: - - $ref: "#/components/parameters/UserId" - responses: + - $ref: "#/components/parameters/GroupId" + security: + - bearerAuth: [] + responses: '200': - description: User disabled. + $ref: "#/components/responses/GroupRes" '400': - description: Failed due to malformed JSON. - '404': - description: Failed due to non existing user. + description: Failed due to malformed query parameters. '401': description: Missing or invalid access token provided. + '404': + description: Group does not exist. '500': - $ref: "#/components/responses/ServiceError" - /health: - get: - summary: Retrieves service health check info. + $ref: "#/components/responses/ServiceError" + + put: + summary: Updates group data. + description: | + Updates Name, Description or Metadata of a group. tags: - - health + - Groups + parameters: + - $ref: "#/components/parameters/GroupId" + security: + - bearerAuth: [] + requestBody: + $ref: "#/components/requestBodies/GroupUpdateReq" responses: '200': - $ref: "#/components/responses/HealthRes" + $ref: "#/components/responses/GroupRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. '500': $ref: "#/components/responses/ServiceError" -components: - schemas: - Token: - type: object - properties: - token: - type: string - format: jwt - description: Generated access token. - required: - - token - UserReqObj: - type: object - properties: - email: - type: string - format: email - example: "test@example.com" - description: User's email address will be used as its unique identifier - password: - type: string - format: password - minimum: 8 - description: Free-form account password used for acquiring auth token(s). + /groups/{groupId}/children: + get: + summary: List children of a certain group + description: | + Lists groups up to a max level of hierarchy that can be fetched in one + request ( max level = 5). Result can be filtered by metadata. Groups will + be returned as JSON array or JSON tree. Due to performance concerns, result + is returned in subsets. + tags: + - Groups + security: + - bearerAuth: [] + parameters: + - $ref: "#/components/parameters/GroupId" + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Level" + - $ref: "#/components/parameters/Tree" + - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/GroupName" + - $ref: "#/components/parameters/ParentId" + - $ref: "#/components/parameters/OwnerId" + responses: + '200': + $ref: "#/components/responses/GroupPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + /groups/{groupId}/parents: + get: + summary: List parents of a certain group + description: | + Lists groups up to a max level of hierarchy that can be fetched in one + request ( max level = 5). Result can be filtered by metadata. Groups will + be returned as JSON array or JSON tree. Due to performance concerns, result + is returned in subsets. + tags: + - Groups + security: + - bearerAuth: [] + parameters: + - $ref: "#/components/parameters/GroupId" + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Level" + - $ref: "#/components/parameters/Tree" + - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/GroupName" + - $ref: "#/components/parameters/ParentId" + - $ref: "#/components/parameters/OwnerId" + responses: + '200': + $ref: "#/components/responses/GroupPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + /groups/{groupId}/enable: + post: + summary: Enables a group + description: | + Enables a specific group that is identifier by the group ID. + tags: + - Groups + parameters: + - $ref: "#/components/parameters/GroupId" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/GroupRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /groups/{groupId}/disable: + post: + summary: Disables a group + description: | + Disables a specific group that is identifier by the group ID. + tags: + - Groups + parameters: + - $ref: "#/components/parameters/GroupId" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/GroupRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: A non-existent entity request. + '422': + description: Database can't process request. + '500': + $ref: "#/components/responses/ServiceError" + + /groups/{groupId}/members: + get: + summary: Get group members. + description: | + Gets members associated with the groupd specified by id. + tags: + - Groups + parameters: + - $ref: "#/components/parameters/GroupId" + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Metadata" + - $ref: "#/components/parameters/Status" + - $ref: "#/components/parameters/UserName" + - $ref: "#/components/parameters/UserIdentity" + - $ref: "#/components/parameters/Tags" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/MembersPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + /policies: + get: + summary: Fetches policy data. + description: | + List available policies. + tags: + - Policies + parameters: + - $ref: "#/components/parameters/Limit" + - $ref: "#/components/parameters/Offset" + - $ref: "#/components/parameters/Subject" + - $ref: "#/components/parameters/Object" + - $ref: "#/components/parameters/Actions" + security: + - bearerAuth: [] + responses: + '200': + $ref: "#/components/responses/PolicyPageRes" + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + post: + tags: + - Policies + summary: Creates new policy + description: | + Creates new policies. Only admin can use this endpoint. Therefore, you need an authentication token for the admin. + Also, only policies defined on the system are allowed to add. For more details, please see the docs for Authorization. + requestBody: + $ref: "#/components/requestBodies/PolicyCreateReq" + security: + - bearerAuth: [] + responses: + '201': + $ref: "#/components/responses/PolicyCreateRes" + '400': + description: Failed due to malformed JSON. + '401': + description: Missing or invalid access token provided. + '403': + description: Unauthorized access token provided. + '409': + description: Failed due to using an existing identity. + '415': + description: Missing or invalid content type. + '500': + $ref: "#/components/responses/ServiceError" + + put: + summary: Updates policy data. + description: | + Updates Actions of a policy. + tags: + - Policies + security: + - bearerAuth: [] + requestBody: + $ref: "#/components/requestBodies/PolicyCreateReq" + responses: + '200': + description: Group updated. + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Group does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + /policies/{sub}/{obj}: + delete: + tags: + - Policies + summary: Delete policy + description: | + Delete specified policies + security: + - bearerAuth: [] + parameters: + - $ref: "#/components/parameters/Sub" + - $ref: "#/components/parameters/Obj" + responses: + '204': + description: Group deleted. + '400': + description: Failed due to malformed query parameters. + '401': + description: Missing or invalid access token provided. + '404': + description: Policy does not exist. + '500': + $ref: "#/components/responses/ServiceError" + + /health: + get: + summary: Retrieves service health check info. + tags: + - health + responses: + '200': + $ref: "#/components/responses/HealthRes" + '500': + $ref: "#/components/responses/ServiceError" + +components: + schemas: + UserReqObj: + type: object + properties: + name: + type: string + example: userName + description: User name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['tag1', 'tag2'] + description: User tags. + credentials: + type: object + properties: + identity: + type: string + example: "admin@example.com" + description: User's identity for example email address will be used as its unique identifier + secret: + type: string + format: password + example: password + minimum: 8 + description: Free-form account secret used for acquiring auth token(s). + owner: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: User owner must be exsiting in the databse. + metadata: + type: object + example: {"domain": "example.com"} + description: Arbitrary, object-encoded user's data. + status: + type: string + description: User Status + format: string + example: enabled + required: + - credentials + + GroupReqObj: + type: object + properties: + name: + type: string + example: groupName + description: Free-form group name. Group name is unique on the given hierarchy level. + description: + type: string + example: long group description + description: Group description, free form text. + parent_id: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Id of parent group, it must be existing group. + metadata: + type: object + example: {"domain": "example.com"} + description: Arbitrary, object-encoded groups's data. + status: + type: string + description: Group Status + format: string + example: enabled + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Group owner ID must be exsiting in the databse. + required: + - name + + PolicyReqObj: + type: object + properties: + subject: + type: string + description: Policy subject refers to the user id + example: 'bb7edb32-2eac-4aad-aebe-ed96fe073879' + object: + type: string + description: Policy object refers to either the user id, group id, computation id or dataset id + example: 'bb7edb32-2eac-4aad-aebe-ed96fe073879' + actions: + type: array + minItems: 0 + items: + type: string + example: ['m_write', 'g_add'] + description: Policy actions. + required: + - subject + - object + - actions + + User: + type: object + properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: User unique identifier. + name: + type: string + example: userName + description: User name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['tag1', 'tag2'] + description: User tags. + owner: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: User owner identifier. + credentials: + type: object + properties: + identity: + type: string + example: admin@mainflux.com + description: User Identity for example email address. + secret: + type: string + example: password + description: User secret password. + metadata: + type: object + example: {"domain": "example.com"} + description: Arbitrary, object-encoded user's data. + status: + type: string + description: User Status + format: string + example: enabled + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the group was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the group was created. + xml: + name: user + + Group: + type: object + properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Unique group identifier generated by the service. + name: + type: string + example: groupName + description: Free-form group name. Group name is unique on the given hierarchy level. + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Group owner identifier of user that created the group.. + parent_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Group parent identifier. + description: + type: string + example: long group description + description: Group description, free form text. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded groups's data. + path: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879.bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Hierarchy path, concatenated ids of group ancestors. + level: + type: integer + description: Level in hierarchy, distance from the root group. + format: int32 + example: 2 + maximum: 5 + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the group was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the group was created. + status: + type: string + description: Group Status + format: string + example: enabled + xml: + name: group + + Memberships: + type: object + properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Unique group identifier generated by the service. + name: + type: string + example: groupName + description: Free-form group name. Group name is unique on the given hierarchy level. + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Group owner identifier of user that created the group.. + parent_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Group parent identifier. + description: + type: string + example: long group description + description: Group description, free form text. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded groups's data. + path: + type: string + example: bb7edb32-2eac-4aad-aebe-ed96fe073879.bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Hierarchy path, concatenated ids of group ancestors. + level: + type: integer + description: Level in hierarchy, distance from the root group. + format: int32 + example: 2 + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the group was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Datetime when the group was created. + xml: + name: memberships + + Members: + type: object + properties: + id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: User unique identifier. + name: + type: string + example: userName + description: User name. + tags: + type: array + minItems: 0 + items: + type: string + example: ['computations', 'datasets'] + description: User tags. + owner: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: User owner identifier. + credentials: + type: object + properties: + identity: + type: string + example: user@mainflux.com + description: User Identity for example email address. + secret: + type: string + example: password + description: User secret password. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded user's data. + status: + type: string + description: User Status + format: string + example: enabled + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the group was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the group was created. + xml: + name: members + + Policy: + type: object + properties: + owner_id: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy owner identifier. + subject: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy subject identifier. + object: + type: string + format: uuid + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + description: Policy object identifier. + actions: + type: array + minItems: 0 + items: + type: string + example: ['m_write', 'g_add'] + description: Policy actions. + created_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the policy was created. + updated_at: + type: string + format: date-time + example: "2019-11-26 13:31:52" + description: Time when the policy was updated. + xml: + name: policy + + UsersPage: + type: object + properties: + users: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/User" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. + required: + - users + - total + - offset + + GroupsPage: + type: object + properties: + groups: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Group" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. required: - - email - - password - User: + - groups + - total + - level + + MembershipsPage: type: object properties: - id: - type: string - format: uuid - example: 18167738-f7a8-4e96-a123-58c3cd14de3a - description: User unique identifier. - email: - type: string - format: email - example: "test@example.com" - description: User's email address will be used as its unique identifier. - metadata: - type: object - description: Arbitrary, object-encoded user's data. - UsersPage: + memberships: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Memberships" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. + required: + - memberships + - total + - level + + MembersPage: type: object properties: - things: + members: type: array minItems: 0 uniqueItems: true items: - $ref: "#/components/schemas/User" + $ref: "#/components/schemas/Members" + total: + type: integer + example: 1 + description: Total number of items. + offset: + type: integer + description: Number of items to skip during retrieval. + limit: + type: integer + example: 10 + description: Maximum number of items to return in one page. + required: + - members + - total + - level + + PoliciesPage: + type: object + properties: + policies: + type: array + minItems: 0 + uniqueItems: true + items: + $ref: "#/components/schemas/Policy" total: type: integer + example: 1 description: Total number of items. offset: type: integer description: Number of items to skip during retrieval. limit: type: integer + example: 10 description: Maximum number of items to return in one page. required: - - things - UserMetadata: + - policies + - total + - offset + + UserUpdate: type: object properties: + name: + type: string + example: userName + description: User name. metadata: type: object + example: {"role": "general"} description: Arbitrary, object-encoded user's data. + required: + - name + - metadata + + UserTags: + type: object + properties: + tags: + type: array + example: ['yello', 'orange'] + description: User tags. + minItems: 0 + uniqueItems: true + items: + type: string + + UserIdentity: + type: object + properties: + identity: + type: string + example: user@mainflux.com + description: User Identity for example email address. + required: + - identity + + UserSecret: + type: object + properties: + old_secret: + type: string + example: oldpassword + description: Old user secret password. + new_secret: + type: string + example: newpassword + description: New user secret password. + required: + - old_secret + - new_secret + + UserOwner: + type: object + properties: + owner: + type: string + example: user@mainflux.com + description: User owner for example email address. + required: + - owner + + GroupUpdate: + type: object + properties: + name: + type: string + example: groupName + description: Free-form group name. Group name is unique on the given hierarchy level. + description: + type: string + example: long description but not too long + description: Group description, free form text. + metadata: + type: object + example: {"role": "general"} + description: Arbitrary, object-encoded groups's data. + required: + - name + - metadata + - description + + PolicyUpdate: + type: object + properties: + actions: + type: array + example: ['m_write', 'g_add'] + description: Policy actions. + minItems: 0 + uniqueItems: true + items: + type: string + + IssueToken: + type: object + properties: + identity: + type: string + example: user@mainflux.com + description: User Identity for example email address. + secret: + type: string + example: password + description: User secret password. + required: + - identity + - secret + Error: type: object properties: error: type: string description: Error message + example: {"error": "malformed entity specification"} + HealthRes: + type: object + properties: + status: + type: string + description: Service status. + enum: + - pass + version: + type: string + description: Service version. + example: 0.0.1 + commit: + type: string + description: Service commit hash. + example: 7d6f4dc4f7f0c1fa3dc24eddfb18bb5073ff4f62 + description: + type: string + description: Service description. + example: service + build_time: + type: string + description: Service build time. + example: 1970-01-01_00:00:00 + parameters: - Referer: - name: Referer - description: Host being sent by browser. - in: header - schema: - type: string - required: true - Metadata: - name: metadata - description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. - in: query - schema: - type: string - minimum: 0 - required: false - UserId: - name: userID - description: Unique user identifier. - in: path - schema: - type: string - format: uuid - required: true - GroupId: - name: groupID - description: Unique group identifier. - in: path - schema: - type: string - format: ulid - required: true - Limit: - name: limit - description: Size of the subset to retrieve. - in: query - schema: - type: integer - default: 10 - maximum: 100 - minimum: 1 - required: false - Offset: - name: offset - description: Number of items to skip during retrieval. - in: query - schema: - type: integer - default: 0 - minimum: 0 - required: false - Status: - name: status - description: User account status. - in: query - schema: - type: string - default: enabled - required: false + Referrer: + name: Referrer + description: Host being sent by browser. + in: header + schema: + type: string + required: true + + UserID: + name: userID + description: Unique user identifier. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Visibility: + name: visibility + description: The visibility specifier when listing users. Either all, shared or mine. + in: path + schema: + type: string + required: true + example: all + + UserName: + name: name + description: User's name. + in: query + schema: + type: string + required: false + example: 'userName' + + UserIdentity: + name: identity + description: User's identity. + in: query + schema: + type: string + required: false + example: 'admin@example.com' + + Owner: + name: owner + description: User's owner. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + UserOwner: + name: owner + description: Unique owner identifier for a user. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + UserVisibility: + name: visibility + description: visibility to list either users I own or users that are shared with me or both users I own and shared with me + in: query + schema: + type: string + required: false + example: shared + + Status: + name: status + description: User account status. + in: query + schema: + type: string + default: enabled + required: false + example: enabled + + Tags: + name: tags + description: User tags. + in: query + schema: + type: array + minItems: 0 + uniqueItems: true + items: + type: string + required: false + example: ['yello', 'orange'] + + GroupName: + name: name + description: Group's name. + in: query + schema: + type: string + required: false + example: 'groupName' + + GroupDescription: + name: name + description: Group's description. + in: query + schema: + type: string + required: false + example: 'group description' + + GroupId: + name: groupId + description: Unique group identifier. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + ParentId: + name: parentId + description: Unique parent identifier for a group. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Level: + name: level + description: Level of hierarchy up to which to retrieve groups from given group id. + in: query + schema: + type: integer + minimum: 1 + maximum: 5 + required: false + + Tree: + name: tree + description: Specify type of response, JSON array or tree. + in: query + required: false + schema: + type: boolean + default: false + + OwnerId: + name: ownerId + description: Unique owner identifier for a group. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Subject: + name: subject + description: Unique subject identifier for a policy. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Object: + name: object + description: Unique object identifier for a policy. + in: query + schema: + type: string + format: uuid + required: false + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Actions: + name: actions + description: Policy action types. + in: query + schema: + type: array + minItems: 0 + uniqueItems: true + items: + type: string + required: false + example: ['m_write', 'g_add'] + + Sub: + name: sub + description: Unique subject identifier for a policy. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Obj: + name: obj + description: Unique object identifier for a policy. + in: path + schema: + type: string + format: uuid + required: true + example: bb7edb32-2eac-4aad-aebe-ed96fe073879 + + Metadata: + name: metadata + description: Metadata filter. Filtering is performed matching the parameter with metadata on top level. Parameter is json. + in: query + schema: + type: string + minimum: 0 + required: false + + Limit: + name: limit + description: Size of the subset to retrieve. + in: query + schema: + type: integer + default: 10 + maximum: 100 + minimum: 1 + required: false + example: '100' + + Offset: + name: offset + description: Number of items to skip during retrieval. + in: query + schema: + type: integer + default: 0 + minimum: 0 + required: false + example: '0' + requestBodies: UserCreateReq: description: JSON-formatted document describing the new user to be registered @@ -428,13 +1688,87 @@ components: application/json: schema: $ref: '#/components/schemas/UserReqObj' + UserUpdateReq: - description: JSON-formated document describing the metadata of user to be update + description: JSON-formated document describing the metadata and name of user to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserUpdate" + + UserUpdateTagsReq: + description: JSON-formated document describing the tags of user to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserTags" + + UserUpdateIdentityReq: + description: Identity change data. User can change its identity. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserIdentity' + + UserUpdateSecretReq: + description: Secret change data. User can change its secret. + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserSecret' + + UserUpdateOwnerReq: + description: JSON-formated document describing the owner of user to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UserOwner" + + GroupCreateReq: + description: JSON-formatted document describing the new group to be registered + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/GroupReqObj' + + GroupUpdateReq: + description: JSON-formated document describing the metadata and name of group to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/GroupUpdate" + + PolicyCreateReq: + description: JSON-formatted document describing the new group to be registered + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PolicyReqObj' + + PolicyUpdateReq: + description: JSON-formated document describing the actions of a policy to be update + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/PolicyUpdate" + + IssueTokenReq: + description: Login credentials. required: true content: application/json: schema: - $ref: "#/components/schemas/UserMetadata" + $ref: "#/components/schemas/IssueToken" + RequestPasswordReset: description: Initiate password request procedure. required: true @@ -447,6 +1781,7 @@ components: type: string format: email description: User email. + PasswordReset: description: Password reset request data, new password and token that is appended on password reset link received in email. content: @@ -468,6 +1803,7 @@ components: type: string format: jwt description: Reset token generated and sent in email. + PasswordChange: description: Password change data. User can change its password. required: true @@ -488,6 +1824,74 @@ components: responses: UserCreateRes: description: Registered new user. + headers: + Location: + schema: + type: string + format: url + description: Registered user relative URL in the format `/users/` + content: + application/json: + schema: + $ref: "#/components/schemas/User" + + UserRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/User" + + UserPageRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/UsersPage" + + MembershipsPageRes: + description: Memberships associated with the user. + content: + application/json: + schema: + $ref: "#/components/schemas/MembershipsPage" + + GroupCreateRes: + description: Registered new group. + headers: + Location: + schema: + type: string + format: url + description: Registered group relative URL in the format `/groups/` + content: + application/json: + schema: + $ref: "#/components/schemas/Group" + + GroupRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/Group" + + GroupPageRes: + description: Data retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/GroupsPage" + + MembersPageRes: + description: Group members retrieved. + content: + application/json: + schema: + $ref: "#/components/schemas/MembersPage" + + PolicyCreateRes: + description: Registered new policy. headers: Location: content: @@ -495,36 +1899,72 @@ components: schema: type: string format: url - description: Registered user relative URL. - example: /users/{userId} - UserRes: + description: Registered policy relative URL. + example: /policy/{subject}/{object} + + PolicyRes: description: Data retrieved. content: application/json: schema: - $ref: "#/components/schemas/User" - UsersPageRes: + $ref: "#/components/schemas/Policy" + + PolicyPageRes: description: Data retrieved. content: application/json: schema: - $ref: "#/components/schemas/UsersPage" - ServiceError: - description: Unexpected server-side error occurred. + $ref: "#/components/schemas/PoliciesPage" + + TokenRes: + description: JSON-formated document describing the user access token used for authenticating into the syetem and refresh token used for generating another access token + content: + application/json: + schema: + type: object + properties: + access_token: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NjU3OTMwNjksImlhdCI6MTY2NTc1NzA2OSwiaXNzIjoibWFpbmZsdXguYXV0aCIsInN1YiI6ImFkbWluQGV4YW1wbGUuY29tIiwiaXNzdWVyX2lkIjoiZmRjZWVhNWYtNjYxNy00MjY1LWJhZDUtMzYxOTNhOTQ0NjMwIiwidHlwZSI6MH0.3gNd_x01QEiZfQxuQoEyqCqTrcxRkXHO7A4iG_gzu3c + description: User access token. + refresh_token: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NjU3OTMwNjksImlhdCI6MTY2NTc1NzA2OSwiaXNzIjoibWFpbmZsdXguYXV0aCIsInN1YiI6ImFkbWluQGV4YW1wbGUuY29tIiwiaXNzdWVyX2lkIjoiZmRjZWVhNWYtNjYxNy00MjY1LWJhZDUtMzYxOTNhOTQ0NjMwIiwidHlwZSI6MH0.3gNd_x01QEiZfQxuQoEyqCqTrcxRkXHO7A4iG_gzu3c + description: User refresh token. + access_type: + type: string + example: access + description: User access token type. + HealthRes: description: Service Health Check. + content: + application/health+json: + schema: + $ref: "#/components/schemas/HealthRes" + + ServiceError: + description: Unexpected server-side error occurred. content: application/json: schema: - $ref: "./schemas/HealthInfo.yml" - + $ref: "#/components/schemas/Error" + securitySchemes: bearerAuth: type: http scheme: bearer bearerFormat: JWT description: | - * Users access: "Authorization: Bearer " - + * User access: "Authorization: Bearer " + + refreshAuth: + type: http + scheme: bearer + bearerFormat: JWT + description: | + * User refresh token used to get another access token: "Authorization: Bearer " security: - bearerAuth: [] + - refreshAuth: [] + \ No newline at end of file diff --git a/api/openapi/websocket.yml b/api/openapi/websocket.yml new file mode 100644 index 0000000000..6f7f5bd803 --- /dev/null +++ b/api/openapi/websocket.yml @@ -0,0 +1,41 @@ +openapi: 3.0.1 +info: + title: Mainflux ws adapter + description: WebSocket API for sending messages through communication channels. + version: "1.0.0" +paths: + /channels/{id}/messages: + post: + summary: Sends message to the communication channel + description: | + Sends message to the communication channel. Messages can be sent as + JSON formatted SenML or as blob. + tags: + - messages + parameters: + - $ref: "#/components/parameters/ID" + requestBody: + $ref: "#/components/requestBodies/MessageReq" + responses: + "202": + description: Message is accepted for processing. + "400": + description: Message discarded due to its malformed content. + "401": + description: Missing or invalid access token provided. + "404": + description: Message discarded due to invalid channel id. + "415": + description: Message discarded due to invalid or missing content type. + '500': + $ref: "#/components/responses/ServiceError" + /health: + get: + summary: Retrieves service health check info. + tags: + - health + responses: + '200': + $ref: "#/components/responses/HealthRes" + '500': + $ref: "#/components/responses/ServiceError" diff --git a/auth.pb.go b/auth.pb.go deleted file mode 100644 index bf19393702..0000000000 --- a/auth.pb.go +++ /dev/null @@ -1,1571 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.12 -// source: auth.proto - -package mainflux - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AccessByKeyReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` - ChanID string `protobuf:"bytes,2,opt,name=chanID,proto3" json:"chanID,omitempty"` -} - -func (x *AccessByKeyReq) Reset() { - *x = AccessByKeyReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AccessByKeyReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AccessByKeyReq) ProtoMessage() {} - -func (x *AccessByKeyReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AccessByKeyReq.ProtoReflect.Descriptor instead. -func (*AccessByKeyReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{0} -} - -func (x *AccessByKeyReq) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - -func (x *AccessByKeyReq) GetChanID() string { - if x != nil { - return x.ChanID - } - return "" -} - -type ChannelOwnerReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - ChanID string `protobuf:"bytes,2,opt,name=chanID,proto3" json:"chanID,omitempty"` -} - -func (x *ChannelOwnerReq) Reset() { - *x = ChannelOwnerReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ChannelOwnerReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChannelOwnerReq) ProtoMessage() {} - -func (x *ChannelOwnerReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ChannelOwnerReq.ProtoReflect.Descriptor instead. -func (*ChannelOwnerReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{1} -} - -func (x *ChannelOwnerReq) GetOwner() string { - if x != nil { - return x.Owner - } - return "" -} - -func (x *ChannelOwnerReq) GetChanID() string { - if x != nil { - return x.ChanID - } - return "" -} - -type ThingID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *ThingID) Reset() { - *x = ThingID{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ThingID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ThingID) ProtoMessage() {} - -func (x *ThingID) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ThingID.ProtoReflect.Descriptor instead. -func (*ThingID) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{2} -} - -func (x *ThingID) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -type ChannelID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *ChannelID) Reset() { - *x = ChannelID{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ChannelID) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChannelID) ProtoMessage() {} - -func (x *ChannelID) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ChannelID.ProtoReflect.Descriptor instead. -func (*ChannelID) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{3} -} - -func (x *ChannelID) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -type AccessByIDReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ThingID string `protobuf:"bytes,1,opt,name=thingID,proto3" json:"thingID,omitempty"` - ChanID string `protobuf:"bytes,2,opt,name=chanID,proto3" json:"chanID,omitempty"` -} - -func (x *AccessByIDReq) Reset() { - *x = AccessByIDReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AccessByIDReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AccessByIDReq) ProtoMessage() {} - -func (x *AccessByIDReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AccessByIDReq.ProtoReflect.Descriptor instead. -func (*AccessByIDReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{4} -} - -func (x *AccessByIDReq) GetThingID() string { - if x != nil { - return x.ThingID - } - return "" -} - -func (x *AccessByIDReq) GetChanID() string { - if x != nil { - return x.ChanID - } - return "" -} - -// If a token is not carrying any information itself, the type -// field can be used to determine how to validate the token. -// Also, different tokens can be encoded in different ways. -type Token struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Token) Reset() { - *x = Token{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Token) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Token) ProtoMessage() {} - -func (x *Token) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Token.ProtoReflect.Descriptor instead. -func (*Token) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{5} -} - -func (x *Token) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -type UserIdentity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` -} - -func (x *UserIdentity) Reset() { - *x = UserIdentity{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UserIdentity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UserIdentity) ProtoMessage() {} - -func (x *UserIdentity) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UserIdentity.ProtoReflect.Descriptor instead. -func (*UserIdentity) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{6} -} - -func (x *UserIdentity) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *UserIdentity) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -type IssueReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` - Type uint32 `protobuf:"varint,3,opt,name=type,proto3" json:"type,omitempty"` -} - -func (x *IssueReq) Reset() { - *x = IssueReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IssueReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IssueReq) ProtoMessage() {} - -func (x *IssueReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IssueReq.ProtoReflect.Descriptor instead. -func (*IssueReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{7} -} - -func (x *IssueReq) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *IssueReq) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *IssueReq) GetType() uint32 { - if x != nil { - return x.Type - } - return 0 -} - -type AuthorizeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` - Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` - Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` -} - -func (x *AuthorizeReq) Reset() { - *x = AuthorizeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthorizeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthorizeReq) ProtoMessage() {} - -func (x *AuthorizeReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthorizeReq.ProtoReflect.Descriptor instead. -func (*AuthorizeReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{8} -} - -func (x *AuthorizeReq) GetSub() string { - if x != nil { - return x.Sub - } - return "" -} - -func (x *AuthorizeReq) GetObj() string { - if x != nil { - return x.Obj - } - return "" -} - -func (x *AuthorizeReq) GetAct() string { - if x != nil { - return x.Act - } - return "" -} - -type AuthorizeRes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Authorized bool `protobuf:"varint,1,opt,name=authorized,proto3" json:"authorized,omitempty"` -} - -func (x *AuthorizeRes) Reset() { - *x = AuthorizeRes{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthorizeRes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthorizeRes) ProtoMessage() {} - -func (x *AuthorizeRes) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthorizeRes.ProtoReflect.Descriptor instead. -func (*AuthorizeRes) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{9} -} - -func (x *AuthorizeRes) GetAuthorized() bool { - if x != nil { - return x.Authorized - } - return false -} - -type AddPolicyReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` - Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` - Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` -} - -func (x *AddPolicyReq) Reset() { - *x = AddPolicyReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddPolicyReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddPolicyReq) ProtoMessage() {} - -func (x *AddPolicyReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddPolicyReq.ProtoReflect.Descriptor instead. -func (*AddPolicyReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{10} -} - -func (x *AddPolicyReq) GetSub() string { - if x != nil { - return x.Sub - } - return "" -} - -func (x *AddPolicyReq) GetObj() string { - if x != nil { - return x.Obj - } - return "" -} - -func (x *AddPolicyReq) GetAct() string { - if x != nil { - return x.Act - } - return "" -} - -type AddPolicyRes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Authorized bool `protobuf:"varint,1,opt,name=authorized,proto3" json:"authorized,omitempty"` -} - -func (x *AddPolicyRes) Reset() { - *x = AddPolicyRes{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddPolicyRes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddPolicyRes) ProtoMessage() {} - -func (x *AddPolicyRes) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddPolicyRes.ProtoReflect.Descriptor instead. -func (*AddPolicyRes) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{11} -} - -func (x *AddPolicyRes) GetAuthorized() bool { - if x != nil { - return x.Authorized - } - return false -} - -type DeletePolicyReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` - Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` - Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` -} - -func (x *DeletePolicyReq) Reset() { - *x = DeletePolicyReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeletePolicyReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeletePolicyReq) ProtoMessage() {} - -func (x *DeletePolicyReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeletePolicyReq.ProtoReflect.Descriptor instead. -func (*DeletePolicyReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{12} -} - -func (x *DeletePolicyReq) GetSub() string { - if x != nil { - return x.Sub - } - return "" -} - -func (x *DeletePolicyReq) GetObj() string { - if x != nil { - return x.Obj - } - return "" -} - -func (x *DeletePolicyReq) GetAct() string { - if x != nil { - return x.Act - } - return "" -} - -type DeletePolicyRes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Deleted bool `protobuf:"varint,1,opt,name=deleted,proto3" json:"deleted,omitempty"` -} - -func (x *DeletePolicyRes) Reset() { - *x = DeletePolicyRes{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeletePolicyRes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeletePolicyRes) ProtoMessage() {} - -func (x *DeletePolicyRes) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeletePolicyRes.ProtoReflect.Descriptor instead. -func (*DeletePolicyRes) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{13} -} - -func (x *DeletePolicyRes) GetDeleted() bool { - if x != nil { - return x.Deleted - } - return false -} - -type ListPoliciesReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` - Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` - Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` -} - -func (x *ListPoliciesReq) Reset() { - *x = ListPoliciesReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListPoliciesReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPoliciesReq) ProtoMessage() {} - -func (x *ListPoliciesReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPoliciesReq.ProtoReflect.Descriptor instead. -func (*ListPoliciesReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{14} -} - -func (x *ListPoliciesReq) GetSub() string { - if x != nil { - return x.Sub - } - return "" -} - -func (x *ListPoliciesReq) GetObj() string { - if x != nil { - return x.Obj - } - return "" -} - -func (x *ListPoliciesReq) GetAct() string { - if x != nil { - return x.Act - } - return "" -} - -type ListPoliciesRes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"` -} - -func (x *ListPoliciesRes) Reset() { - *x = ListPoliciesRes{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListPoliciesRes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPoliciesRes) ProtoMessage() {} - -func (x *ListPoliciesRes) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPoliciesRes.ProtoReflect.Descriptor instead. -func (*ListPoliciesRes) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{15} -} - -func (x *ListPoliciesRes) GetPolicies() []string { - if x != nil { - return x.Policies - } - return nil -} - -type Assignment struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` - GroupID string `protobuf:"bytes,2,opt,name=groupID,proto3" json:"groupID,omitempty"` - MemberID string `protobuf:"bytes,3,opt,name=memberID,proto3" json:"memberID,omitempty"` -} - -func (x *Assignment) Reset() { - *x = Assignment{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Assignment) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Assignment) ProtoMessage() {} - -func (x *Assignment) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Assignment.ProtoReflect.Descriptor instead. -func (*Assignment) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{16} -} - -func (x *Assignment) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - -func (x *Assignment) GetGroupID() string { - if x != nil { - return x.GroupID - } - return "" -} - -func (x *Assignment) GetMemberID() string { - if x != nil { - return x.MemberID - } - return "" -} - -type MembersReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` - GroupID string `protobuf:"bytes,2,opt,name=groupID,proto3" json:"groupID,omitempty"` - Offset uint64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` - Limit uint64 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` -} - -func (x *MembersReq) Reset() { - *x = MembersReq{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MembersReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MembersReq) ProtoMessage() {} - -func (x *MembersReq) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MembersReq.ProtoReflect.Descriptor instead. -func (*MembersReq) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{17} -} - -func (x *MembersReq) GetToken() string { - if x != nil { - return x.Token - } - return "" -} - -func (x *MembersReq) GetGroupID() string { - if x != nil { - return x.GroupID - } - return "" -} - -func (x *MembersReq) GetOffset() uint64 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *MembersReq) GetLimit() uint64 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *MembersReq) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -type MembersRes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Offset uint64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` - Limit uint64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` - Members []string `protobuf:"bytes,5,rep,name=members,proto3" json:"members,omitempty"` -} - -func (x *MembersRes) Reset() { - *x = MembersRes{} - if protoimpl.UnsafeEnabled { - mi := &file_auth_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MembersRes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MembersRes) ProtoMessage() {} - -func (x *MembersRes) ProtoReflect() protoreflect.Message { - mi := &file_auth_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MembersRes.ProtoReflect.Descriptor instead. -func (*MembersRes) Descriptor() ([]byte, []int) { - return file_auth_proto_rawDescGZIP(), []int{18} -} - -func (x *MembersRes) GetTotal() uint64 { - if x != nil { - return x.Total - } - return 0 -} - -func (x *MembersRes) GetOffset() uint64 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *MembersRes) GetLimit() uint64 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *MembersRes) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *MembersRes) GetMembers() []string { - if x != nil { - return x.Members - } - return nil -} - -var File_auth_proto protoreflect.FileDescriptor - -var file_auth_proto_rawDesc = []byte{ - 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6d, 0x61, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x3e, 0x0a, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x79, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x63, - 0x68, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x68, 0x61, - 0x6e, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x77, - 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x63, 0x68, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x68, - 0x61, 0x6e, 0x49, 0x44, 0x22, 0x1f, 0x0a, 0x07, 0x54, 0x68, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x41, 0x0a, 0x0d, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x68, 0x69, - 0x6e, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x68, 0x69, 0x6e, - 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x68, 0x61, 0x6e, 0x49, 0x44, 0x22, 0x1d, 0x0a, 0x05, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x34, 0x0a, 0x0c, 0x55, 0x73, - 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x22, 0x44, 0x0a, 0x08, 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x44, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x22, 0x2e, 0x0a, 0x0c, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, - 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x22, 0x44, 0x0a, 0x0c, - 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, - 0x73, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, - 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, - 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, - 0x63, 0x74, 0x22, 0x2e, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x65, 0x64, 0x22, 0x47, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x47, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, - 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, - 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, - 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, - 0x74, 0x22, 0x2d, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x22, 0x58, 0x0a, 0x0a, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x12, 0x1a, - 0x0a, 0x08, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x49, 0x44, 0x22, 0x7e, 0x0a, 0x0a, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x18, - 0x0a, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7e, 0x0a, 0x0a, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x32, 0x8d, 0x02, 0x0a, 0x0d, 0x54, - 0x68, 0x69, 0x6e, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x0e, - 0x43, 0x61, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x18, - 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x42, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x2e, 0x54, 0x68, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x22, 0x00, 0x12, 0x45, 0x0a, - 0x0e, 0x49, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, - 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0d, 0x43, 0x61, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x42, 0x79, 0x49, 0x44, 0x12, 0x17, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x30, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x79, 0x12, 0x0f, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x2e, 0x54, 0x68, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x22, 0x00, 0x32, 0xf5, 0x03, 0x0a, 0x0b, 0x41, - 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x49, 0x73, - 0x73, 0x75, 0x65, 0x12, 0x12, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x49, - 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x0f, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, - 0x00, 0x12, 0x3d, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x16, - 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, - 0x12, 0x3d, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x16, 0x2e, - 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, - 0x46, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, - 0x38, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x69, 0x6e, - 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x07, 0x4d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, - 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x6d, 0x61, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_auth_proto_rawDescOnce sync.Once - file_auth_proto_rawDescData = file_auth_proto_rawDesc -) - -func file_auth_proto_rawDescGZIP() []byte { - file_auth_proto_rawDescOnce.Do(func() { - file_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_proto_rawDescData) - }) - return file_auth_proto_rawDescData -} - -var file_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 19) -var file_auth_proto_goTypes = []interface{}{ - (*AccessByKeyReq)(nil), // 0: mainflux.AccessByKeyReq - (*ChannelOwnerReq)(nil), // 1: mainflux.ChannelOwnerReq - (*ThingID)(nil), // 2: mainflux.ThingID - (*ChannelID)(nil), // 3: mainflux.ChannelID - (*AccessByIDReq)(nil), // 4: mainflux.AccessByIDReq - (*Token)(nil), // 5: mainflux.Token - (*UserIdentity)(nil), // 6: mainflux.UserIdentity - (*IssueReq)(nil), // 7: mainflux.IssueReq - (*AuthorizeReq)(nil), // 8: mainflux.AuthorizeReq - (*AuthorizeRes)(nil), // 9: mainflux.AuthorizeRes - (*AddPolicyReq)(nil), // 10: mainflux.AddPolicyReq - (*AddPolicyRes)(nil), // 11: mainflux.AddPolicyRes - (*DeletePolicyReq)(nil), // 12: mainflux.DeletePolicyReq - (*DeletePolicyRes)(nil), // 13: mainflux.DeletePolicyRes - (*ListPoliciesReq)(nil), // 14: mainflux.ListPoliciesReq - (*ListPoliciesRes)(nil), // 15: mainflux.ListPoliciesRes - (*Assignment)(nil), // 16: mainflux.Assignment - (*MembersReq)(nil), // 17: mainflux.MembersReq - (*MembersRes)(nil), // 18: mainflux.MembersRes - (*emptypb.Empty)(nil), // 19: google.protobuf.Empty -} -var file_auth_proto_depIdxs = []int32{ - 0, // 0: mainflux.ThingsService.CanAccessByKey:input_type -> mainflux.AccessByKeyReq - 1, // 1: mainflux.ThingsService.IsChannelOwner:input_type -> mainflux.ChannelOwnerReq - 4, // 2: mainflux.ThingsService.CanAccessByID:input_type -> mainflux.AccessByIDReq - 5, // 3: mainflux.ThingsService.Identify:input_type -> mainflux.Token - 7, // 4: mainflux.AuthService.Issue:input_type -> mainflux.IssueReq - 5, // 5: mainflux.AuthService.Identify:input_type -> mainflux.Token - 8, // 6: mainflux.AuthService.Authorize:input_type -> mainflux.AuthorizeReq - 10, // 7: mainflux.AuthService.AddPolicy:input_type -> mainflux.AddPolicyReq - 12, // 8: mainflux.AuthService.DeletePolicy:input_type -> mainflux.DeletePolicyReq - 14, // 9: mainflux.AuthService.ListPolicies:input_type -> mainflux.ListPoliciesReq - 16, // 10: mainflux.AuthService.Assign:input_type -> mainflux.Assignment - 17, // 11: mainflux.AuthService.Members:input_type -> mainflux.MembersReq - 2, // 12: mainflux.ThingsService.CanAccessByKey:output_type -> mainflux.ThingID - 19, // 13: mainflux.ThingsService.IsChannelOwner:output_type -> google.protobuf.Empty - 19, // 14: mainflux.ThingsService.CanAccessByID:output_type -> google.protobuf.Empty - 2, // 15: mainflux.ThingsService.Identify:output_type -> mainflux.ThingID - 5, // 16: mainflux.AuthService.Issue:output_type -> mainflux.Token - 6, // 17: mainflux.AuthService.Identify:output_type -> mainflux.UserIdentity - 9, // 18: mainflux.AuthService.Authorize:output_type -> mainflux.AuthorizeRes - 11, // 19: mainflux.AuthService.AddPolicy:output_type -> mainflux.AddPolicyRes - 13, // 20: mainflux.AuthService.DeletePolicy:output_type -> mainflux.DeletePolicyRes - 15, // 21: mainflux.AuthService.ListPolicies:output_type -> mainflux.ListPoliciesRes - 19, // 22: mainflux.AuthService.Assign:output_type -> google.protobuf.Empty - 18, // 23: mainflux.AuthService.Members:output_type -> mainflux.MembersRes - 12, // [12:24] is the sub-list for method output_type - 0, // [0:12] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_auth_proto_init() } -func file_auth_proto_init() { - if File_auth_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AccessByKeyReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChannelOwnerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ThingID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChannelID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AccessByIDReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Token); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UserIdentity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IssueReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizeRes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddPolicyReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddPolicyRes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeletePolicyReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeletePolicyRes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListPoliciesReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListPoliciesRes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Assignment); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MembersReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_auth_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MembersRes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_auth_proto_rawDesc, - NumEnums: 0, - NumMessages: 19, - NumExtensions: 0, - NumServices: 2, - }, - GoTypes: file_auth_proto_goTypes, - DependencyIndexes: file_auth_proto_depIdxs, - MessageInfos: file_auth_proto_msgTypes, - }.Build() - File_auth_proto = out.File - file_auth_proto_rawDesc = nil - file_auth_proto_goTypes = nil - file_auth_proto_depIdxs = nil -} diff --git a/auth.proto b/auth.proto deleted file mode 100644 index c32dbf3e38..0000000000 --- a/auth.proto +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package mainflux; - -import "google/protobuf/empty.proto"; - -option go_package = "./mainflux"; - -service ThingsService { - rpc CanAccessByKey(AccessByKeyReq) returns (ThingID) {} - rpc IsChannelOwner(ChannelOwnerReq) returns (google.protobuf.Empty) {} - rpc CanAccessByID(AccessByIDReq) returns (google.protobuf.Empty) {} - rpc Identify(Token) returns (ThingID) {} -} - -service AuthService { - rpc Issue(IssueReq) returns (Token) {} - rpc Identify(Token) returns (UserIdentity) {} - rpc Authorize(AuthorizeReq) returns (AuthorizeRes) {} - rpc AddPolicy(AddPolicyReq) returns (AddPolicyRes) {} - rpc DeletePolicy(DeletePolicyReq) returns (DeletePolicyRes) {} - rpc ListPolicies(ListPoliciesReq) returns (ListPoliciesRes) {} - rpc Assign(Assignment) returns(google.protobuf.Empty) {} - rpc Members(MembersReq) returns (MembersRes) {} -} - -message AccessByKeyReq { - string token = 1; - string chanID = 2; -} - -message ChannelOwnerReq { - string owner = 1; - string chanID = 2; -} - -message ThingID { - string value = 1; -} - -message ChannelID { - string value = 1; -} - -message AccessByIDReq { - string thingID = 1; - string chanID = 2; -} - -// If a token is not carrying any information itself, the type -// field can be used to determine how to validate the token. -// Also, different tokens can be encoded in different ways. -message Token { - string value = 1; -} - -message UserIdentity { - string id = 1; - string email = 2; -} - -message IssueReq { - string id = 1; - string email = 2; - uint32 type = 3; -} - -message AuthorizeReq { - string sub = 1; - string obj = 2; - string act = 3; -} - -message AuthorizeRes { - bool authorized = 1; -} - -message AddPolicyReq { - string sub = 1; - string obj = 2; - string act = 3; -} - -message AddPolicyRes { - bool authorized = 1; -} - -message DeletePolicyReq { - string sub = 1; - string obj = 2; - string act = 3; -} - -message DeletePolicyRes { - bool deleted = 1; -} - -message ListPoliciesReq { - string sub = 1; - string obj = 2; - string act = 3; -} - -message ListPoliciesRes { - repeated string policies = 1; -} - -message Assignment { - string token = 1; - string groupID = 2; - string memberID = 3; -} - -message MembersReq { - string token = 1; - string groupID = 2; - uint64 offset = 3; - uint64 limit = 4; - string type = 5; -} - -message MembersRes { - uint64 total = 1; - uint64 offset = 2; - uint64 limit = 3; - string type = 4; - repeated string members = 5; -} diff --git a/auth/README.md b/auth/README.md deleted file mode 100644 index dca9f119a4..0000000000 --- a/auth/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# Auth - Authentication and Authorization service - -Auth service provides authentication features as an API for managing authentication keys as well as administering groups of entities - `things` and `users`. - -# Authentication -User service is using Auth service gRPC API to obtain login token or password reset token. Authentication key consists of the following fields: -- ID - key ID -- Type - one of the three types described below -- IssuerID - an ID of the Mainflux User who issued the key -- Subject - user email -- IssuedAt - the timestamp when the key is issued -- ExpiresAt - the timestamp after which the key is invalid - -There are *three types of authentication keys*: - -- User key - keys issued to the user upon login request -- API key - keys issued upon the user request -- Recovery key - password recovery key - -Authentication keys are represented and distributed by the corresponding [JWT](jwt.io). - -User keys are issued when user logs in. Each user request (other than `registration` and `login`) contains user key that is used to authenticate the user. - -API keys are similar to the User keys. The main difference is that API keys have configurable expiration time. If no time is set, the key will never expire. For that reason, API keys are _the only key type that can be revoked_. This also means that, despite being used as a JWT, it requires a query to the database to validate the API key. The user with API key can perform all the same actions as the user with login key (can act on behalf of the user for Thing, Channel, or user profile management), *except issuing new API keys*. - -Recovery key is the password recovery key. It's short-lived token used for password recovery process. - -For in-depth explanation of the aforementioned scenarios, as well as thorough -understanding of Mainflux, please check out the [official documentation][doc]. - -The following actions are supported: - -- create (all key types) -- verify (all key types) -- obtain (API keys only) -- revoke (API keys only) - -# Groups -User and Things service are using Auth gRPC API to get the list of ids that are part of a group. Groups can be organized as tree structure. -Group consists of the following fields: - -- ID - ULID id uniquely representing group -- Name - name of the group, name of the group is unique at the same level of tree hierarchy for a given tree. -- ParentID - id of the parent group -- OwnerID - id of the user that created a group -- Description - free form text, up to 1024 characters -- Metadata - Arbitrary, object-encoded group's data -- Path - tree path consisting of group ids -- CreatedAt - timestamp at which the group is created -- UpdatedAt - timestamp at which the group is updated - -## Configuration - -The service is configured using the environment variables presented in the -following table. Note that any unset variables will be replaced with their -default values. - -| Variable | Description | Default | -|-------------------------------|--------------------------------------------------------------------------|----------------| -| MF_AUTH_LOG_LEVEL | Service level (debug, info, warn, error) | info | -| MF_AUTH_DB_HOST | Database host address | localhost | -| MF_AUTH_DB_PORT | Database host port | 5432 | -| MF_AUTH_DB_USER | Database user | mainflux | -| MF_AUTH_DB_PASSWORD | Database password | mainflux | -| MF_AUTH_DB | Name of the database used by the service | auth | -| MF_AUTH_DB_SSL_MODE | Database connection SSL mode (disable, require, verify-ca, verify-full) | disable | -| MF_AUTH_DB_SSL_CERT | Path to the PEM encoded certificate file | | -| MF_AUTH_DB_SSL_KEY | Path to the PEM encoded key file | | -| MF_AUTH_DB_SSL_ROOT_CERT | Path to the PEM encoded root certificate file | | -| MF_AUTH_HTTP_PORT | Auth service HTTP port | 9020 | -| MF_AUTH_GRPC_PORT | Auth service gRPC port | 7001 | -| MF_AUTH_SERVER_CERT | Path to server certificate in pem format | | -| MF_AUTH_SERVER_KEY | Path to server key in pem format | | -| MF_AUTH_SECRET | String used for signing tokens | auth | -| MF_AUTH_LOGIN_TOKEN_DURATION | The login token expiration period | 10h | -| MF_JAEGER_URL | Jaeger server URL | localhost:6831 | -| MF_KETO_READ_REMOTE_HOST | Keto Read Host | mainflux-keto | -| MF_KETO_WRITE_REMOTE_HOST | Keto Write Host | mainflux-keto | -| MF_KETO_READ_REMOTE_PORT | Keto Read Port | 4466 | -| MF_KETO_WRITE_REMOTE_PORT | Keto Write Port | 4467 | - -## Deployment - -The service itself is distributed as Docker container. Check the [`auth`](https://github.com/mainflux/mainflux/blob/master/docker/docker-compose.yml#L71-L94) service section in -docker-compose to see how service is deployed. - - -To start the service outside of the container, execute the following shell script: - -```bash -# download the latest version of the service -go get github.com/mainflux/mainflux - -cd $GOPATH/src/github.com/mainflux/mainflux - -# compile the service -make auth - -# copy binary to bin -make install - -# set the environment variables and run the service -MF_AUTH_LOG_LEVEL=[Service log level] MF_AUTH_DB_HOST=[Database host address] MF_AUTH_DB_PORT=[Database host port] MF_AUTH_DB_USER=[Database user] MF_AUTH_DB_PASS=[Database password] MF_AUTH_DB=[Name of the database used by the service] MF_AUTH_DB_SSL_MODE=[SSL mode to connect to the database with] MF_AUTH_DB_SSL_CERT=[Path to the PEM encoded certificate file] MF_AUTH_DB_SSL_KEY=[Path to the PEM encoded key file] MF_AUTH_DB_SSL_ROOT_CERT=[Path to the PEM encoded root certificate file] MF_AUTH_HTTP_PORT=[Service HTTP port] MF_AUTH_GRPC_PORT=[Service gRPC port] MF_AUTH_SECRET=[String used for signing tokens] MF_AUTH_SERVER_CERT=[Path to server certificate] MF_AUTH_SERVER_KEY=[Path to server key] MF_JAEGER_URL=[Jaeger server URL] MF_AUTH_LOGIN_TOKEN_DURATION=[The login token expiration period] $GOBIN/mainflux-auth -``` - -If `MF_EMAIL_TEMPLATE` doesn't point to any file service will function but password reset functionality will not work. - -## Usage - -For more information about service capabilities and its usage, please check out -the [API documentation](https://api.mainflux.io/?urls.primaryName=auth-openapi.yml). - -[doc]: https://docs.mainflux.io diff --git a/auth/api/doc.go b/auth/api/doc.go deleted file mode 100644 index 93a6c7a2c1..0000000000 --- a/auth/api/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package api contains implementation of Auth service HTTP API. -package api diff --git a/auth/api/grpc/client.go b/auth/api/grpc/client.go deleted file mode 100644 index eccf5aafa7..0000000000 --- a/auth/api/grpc/client.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - "time" - - "github.com/go-kit/kit/endpoint" - kitot "github.com/go-kit/kit/tracing/opentracing" - kitgrpc "github.com/go-kit/kit/transport/grpc" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - opentracing "github.com/opentracing/opentracing-go" - "google.golang.org/grpc" -) - -const ( - svcName = "mainflux.AuthService" -) - -var _ mainflux.AuthServiceClient = (*grpcClient)(nil) - -type grpcClient struct { - issue endpoint.Endpoint - identify endpoint.Endpoint - authorize endpoint.Endpoint - addPolicy endpoint.Endpoint - deletePolicy endpoint.Endpoint - listPolicies endpoint.Endpoint - assign endpoint.Endpoint - members endpoint.Endpoint - timeout time.Duration -} - -// NewClient returns new gRPC client instance. -func NewClient(tracer opentracing.Tracer, conn *grpc.ClientConn, timeout time.Duration) mainflux.AuthServiceClient { - return &grpcClient{ - issue: kitot.TraceClient(tracer, "issue")(kitgrpc.NewClient( - conn, - svcName, - "Issue", - encodeIssueRequest, - decodeIssueResponse, - mainflux.UserIdentity{}, - ).Endpoint()), - identify: kitot.TraceClient(tracer, "identify")(kitgrpc.NewClient( - conn, - svcName, - "Identify", - encodeIdentifyRequest, - decodeIdentifyResponse, - mainflux.UserIdentity{}, - ).Endpoint()), - authorize: kitot.TraceClient(tracer, "authorize")(kitgrpc.NewClient( - conn, - svcName, - "Authorize", - encodeAuthorizeRequest, - decodeAuthorizeResponse, - mainflux.AuthorizeRes{}, - ).Endpoint()), - addPolicy: kitot.TraceClient(tracer, "add_policy")(kitgrpc.NewClient( - conn, - svcName, - "AddPolicy", - encodeAddPolicyRequest, - decodeAddPolicyResponse, - mainflux.AddPolicyRes{}, - ).Endpoint()), - deletePolicy: kitot.TraceClient(tracer, "delete_policy")(kitgrpc.NewClient( - conn, - svcName, - "DeletePolicy", - encodeDeletePolicyRequest, - decodeDeletePolicyResponse, - mainflux.DeletePolicyRes{}, - ).Endpoint()), - listPolicies: kitot.TraceClient(tracer, "list_policies")(kitgrpc.NewClient( - conn, - svcName, - "ListPolicies", - encodeListPoliciesRequest, - decodeListPoliciesResponse, - mainflux.ListPoliciesRes{}, - ).Endpoint()), - assign: kitot.TraceClient(tracer, "assign")(kitgrpc.NewClient( - conn, - svcName, - "Assign", - encodeAssignRequest, - decodeAssignResponse, - mainflux.AuthorizeRes{}, - ).Endpoint()), - members: kitot.TraceClient(tracer, "members")(kitgrpc.NewClient( - conn, - svcName, - "Members", - encodeMembersRequest, - decodeMembersResponse, - mainflux.MembersRes{}, - ).Endpoint()), - - timeout: timeout, - } -} - -func (client grpcClient) Issue(ctx context.Context, req *mainflux.IssueReq, _ ...grpc.CallOption) (*mainflux.Token, error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.issue(ctx, issueReq{id: req.GetId(), email: req.GetEmail(), keyType: req.Type}) - if err != nil { - return nil, err - } - - ir := res.(identityRes) - return &mainflux.Token{Value: ir.id}, nil -} - -func encodeIssueRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(issueReq) - return &mainflux.IssueReq{Id: req.id, Email: req.email, Type: req.keyType}, nil -} - -func decodeIssueResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.UserIdentity) - return identityRes{id: res.GetId(), email: res.GetEmail()}, nil -} - -func (client grpcClient) Identify(ctx context.Context, token *mainflux.Token, _ ...grpc.CallOption) (*mainflux.UserIdentity, error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.identify(ctx, identityReq{token: token.GetValue()}) - if err != nil { - return nil, err - } - - ir := res.(identityRes) - return &mainflux.UserIdentity{Id: ir.id, Email: ir.email}, nil -} - -func encodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(identityReq) - return &mainflux.Token{Value: req.token}, nil -} - -func decodeIdentifyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.UserIdentity) - return identityRes{id: res.GetId(), email: res.GetEmail()}, nil -} - -func (client grpcClient) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.authorize(ctx, authReq{Act: req.GetAct(), Obj: req.GetObj(), Sub: req.GetSub()}) - if err != nil { - return &mainflux.AuthorizeRes{}, err - } - - ar := res.(authorizeRes) - return &mainflux.AuthorizeRes{Authorized: ar.authorized}, err -} - -func decodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.AuthorizeRes) - return authorizeRes{authorized: res.Authorized}, nil -} - -func encodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(authReq) - return &mainflux.AuthorizeReq{ - Sub: req.Sub, - Obj: req.Obj, - Act: req.Act, - }, nil -} - -func (client grpcClient) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.addPolicy(ctx, policyReq{Act: in.GetAct(), Obj: in.GetObj(), Sub: in.GetSub()}) - if err != nil { - return &mainflux.AddPolicyRes{}, err - } - - apr := res.(addPolicyRes) - return &mainflux.AddPolicyRes{Authorized: apr.authorized}, err -} - -func decodeAddPolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.AddPolicyRes) - return addPolicyRes{authorized: res.Authorized}, nil -} - -func encodeAddPolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(policyReq) - return &mainflux.AddPolicyReq{ - Sub: req.Sub, - Obj: req.Obj, - Act: req.Act, - }, nil -} - -func (client grpcClient) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.deletePolicy(ctx, policyReq{Act: in.GetAct(), Obj: in.GetObj(), Sub: in.GetSub()}) - if err != nil { - return &mainflux.DeletePolicyRes{}, err - } - - dpr := res.(deletePolicyRes) - return &mainflux.DeletePolicyRes{Deleted: dpr.deleted}, err -} - -func decodeDeletePolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.DeletePolicyRes) - return deletePolicyRes{deleted: res.GetDeleted()}, nil -} - -func encodeDeletePolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(policyReq) - return &mainflux.DeletePolicyReq{ - Sub: req.Sub, - Obj: req.Obj, - Act: req.Act, - }, nil -} - -func (client grpcClient) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.listPolicies(ctx, listPoliciesReq{Obj: in.GetObj(), Act: in.GetAct(), Sub: in.GetSub()}) - if err != nil { - return &mainflux.ListPoliciesRes{}, err - } - - lpr := res.(listPoliciesRes) - return &mainflux.ListPoliciesRes{Policies: lpr.policies}, err -} - -func decodeListPoliciesResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.ListPoliciesRes) - return listPoliciesRes{policies: res.GetPolicies()}, nil -} - -func encodeListPoliciesRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(listPoliciesReq) - return &mainflux.ListPoliciesReq{ - Sub: req.Sub, - Obj: req.Obj, - Act: req.Act, - }, nil -} - -func (client grpcClient) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - res, err := client.members(ctx, membersReq{ - token: req.GetToken(), - groupID: req.GetGroupID(), - memberType: req.GetType(), - offset: req.GetOffset(), - limit: req.GetLimit(), - }) - if err != nil { - return &mainflux.MembersRes{}, err - } - - mr := res.(membersRes) - - return &mainflux.MembersRes{ - Offset: mr.offset, - Limit: mr.limit, - Total: mr.total, - Type: mr.groupType, - Members: mr.members, - }, err -} - -func encodeMembersRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(membersReq) - return &mainflux.MembersReq{ - Token: req.token, - Offset: req.offset, - Limit: req.limit, - GroupID: req.groupID, - Type: req.memberType, - }, nil -} - -func decodeMembersResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.MembersRes) - return membersRes{ - offset: res.Offset, - limit: res.Limit, - total: res.Total, - members: res.Members, - }, nil -} - -func (client grpcClient) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { - ctx, close := context.WithTimeout(ctx, client.timeout) - defer close() - - _, err = client.assign(ctx, assignReq{token: req.GetToken(), groupID: req.GetGroupID(), memberID: req.GetMemberID()}) - if err != nil { - return &empty.Empty{}, err - } - - return &empty.Empty{}, err -} - -func encodeAssignRequest(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.AuthorizeRes) - return authorizeRes{authorized: res.Authorized}, nil -} - -func decodeAssignResponse(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(authReq) - return &mainflux.AuthorizeReq{ - Sub: req.Sub, - Obj: req.Obj, - Act: req.Act, - }, nil -} diff --git a/auth/api/grpc/doc.go b/auth/api/grpc/doc.go deleted file mode 100644 index 331229c21c..0000000000 --- a/auth/api/grpc/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package grpc contains implementation of Auth service gRPC API. -package grpc diff --git a/auth/api/grpc/endpoint.go b/auth/api/grpc/endpoint.go deleted file mode 100644 index c0e09c3621..0000000000 --- a/auth/api/grpc/endpoint.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - "time" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/auth" -) - -func issueEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(issueReq) - if err := req.validate(); err != nil { - return issueRes{}, err - } - - key := auth.Key{ - Type: req.keyType, - Subject: req.email, - IssuerID: req.id, - IssuedAt: time.Now().UTC(), - } - - _, secret, err := svc.Issue(ctx, "", key) - if err != nil { - return issueRes{}, err - } - - return issueRes{secret}, nil - } -} - -func identifyEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(identityReq) - if err := req.validate(); err != nil { - return identityRes{}, err - } - - id, err := svc.Identify(ctx, req.token) - if err != nil { - return identityRes{}, err - } - - ret := identityRes{ - id: id.ID, - email: id.Email, - } - return ret, nil - } -} - -func authorizeEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(authReq) - - if err := req.validate(); err != nil { - return authorizeRes{}, err - } - - err := svc.Authorize(ctx, auth.PolicyReq{Subject: req.Sub, Object: req.Obj, Relation: req.Act}) - if err != nil { - return authorizeRes{}, err - } - return authorizeRes{authorized: true}, err - } -} - -func addPolicyEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(policyReq) - if err := req.validate(); err != nil { - return addPolicyRes{}, err - } - - err := svc.AddPolicy(ctx, auth.PolicyReq{Subject: req.Sub, Object: req.Obj, Relation: req.Act}) - if err != nil { - return addPolicyRes{}, err - } - return addPolicyRes{authorized: true}, err - } -} - -func deletePolicyEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(policyReq) - if err := req.validate(); err != nil { - return deletePolicyRes{}, err - } - - err := svc.DeletePolicy(ctx, auth.PolicyReq{Subject: req.Sub, Object: req.Obj, Relation: req.Act}) - if err != nil { - return deletePolicyRes{}, err - } - return deletePolicyRes{deleted: true}, nil - } -} - -func listPoliciesEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listPoliciesReq) - - page, err := svc.ListPolicies(ctx, auth.PolicyReq{Subject: req.Sub, Object: req.Obj, Relation: req.Act}) - if err != nil { - return deletePolicyRes{}, err - } - return listPoliciesRes{policies: page.Policies}, nil - } -} - -func assignEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(assignReq) - - if err := req.validate(); err != nil { - return emptyRes{}, err - } - - _, err := svc.Identify(ctx, req.token) - if err != nil { - return emptyRes{}, err - } - - err = svc.Assign(ctx, req.token, req.memberID, req.groupID, req.groupType) - if err != nil { - return emptyRes{}, err - } - return emptyRes{}, nil - - } -} - -func membersEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(membersReq) - if err := req.validate(); err != nil { - return membersRes{}, err - } - - pm := auth.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - } - mp, err := svc.ListMembers(ctx, req.token, req.groupID, req.memberType, pm) - if err != nil { - return membersRes{}, err - } - var members []string - for _, m := range mp.Members { - members = append(members, m.ID) - } - return membersRes{ - offset: req.offset, - limit: req.limit, - total: mp.PageMetadata.Total, - members: members, - }, nil - } -} diff --git a/auth/api/grpc/endpoint_test.go b/auth/api/grpc/endpoint_test.go deleted file mode 100644 index ff99439ea2..0000000000 --- a/auth/api/grpc/endpoint_test.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - grpcapi "github.com/mainflux/mainflux/auth/api/grpc" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" -) - -const ( - port = 7001 - secret = "secret" - email = "test@example.com" - id = "testID" - thingsType = "things" - usersType = "users" - description = "Description" - - numOfThings = 5 - numOfUsers = 5 - - authoritiesObj = "authorities" - memberRelation = "member" - loginDuration = 30 * time.Minute -) - -var svc auth.Service - -func TestIssue(t *testing.T) { - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - cases := []struct { - desc string - id string - email string - kind uint32 - err error - code codes.Code - }{ - { - desc: "issue for user with valid token", - id: id, - email: email, - kind: auth.LoginKey, - err: nil, - code: codes.OK, - }, - { - desc: "issue recovery key", - id: id, - email: email, - kind: auth.RecoveryKey, - err: nil, - code: codes.OK, - }, - { - desc: "issue API key unauthenticated", - id: id, - email: email, - kind: auth.APIKey, - err: nil, - code: codes.Unauthenticated, - }, - { - desc: "issue for invalid key type", - id: id, - email: email, - kind: 32, - err: status.Error(codes.InvalidArgument, "received invalid token request"), - code: codes.InvalidArgument, - }, - { - desc: "issue for user that exist", - id: "", - email: "", - kind: auth.APIKey, - err: status.Error(codes.Unauthenticated, "unauthenticated access"), - code: codes.Unauthenticated, - }, - } - - for _, tc := range cases { - _, err := client.Issue(context.Background(), &mainflux.IssueReq{Id: tc.id, Email: tc.email, Type: tc.kind}) - e, ok := status.FromError(err) - assert.True(t, ok, "gRPC status can't be extracted from the error") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - } -} - -func TestIdentify(t *testing.T) { - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - _, recoverySecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.RecoveryKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing recovery key expected to succeed: %s", err)) - - _, apiSecret, err := svc.Issue(context.Background(), loginSecret, auth.Key{Type: auth.APIKey, IssuedAt: time.Now(), ExpiresAt: time.Now().Add(time.Minute), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing API key expected to succeed: %s", err)) - - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - cases := []struct { - desc string - token string - idt *mainflux.UserIdentity - err error - code codes.Code - }{ - { - desc: "identify user with user token", - token: loginSecret, - idt: &mainflux.UserIdentity{Email: email, Id: id}, - err: nil, - code: codes.OK, - }, - { - desc: "identify user with recovery token", - token: recoverySecret, - idt: &mainflux.UserIdentity{Email: email, Id: id}, - err: nil, - code: codes.OK, - }, - { - desc: "identify user with API token", - token: apiSecret, - idt: &mainflux.UserIdentity{Email: email, Id: id}, - err: nil, - code: codes.OK, - }, - { - desc: "identify user with invalid user token", - token: "invalid", - idt: &mainflux.UserIdentity{}, - err: status.Error(codes.Unauthenticated, "unauthenticated access"), - code: codes.Unauthenticated, - }, - { - desc: "identify user with empty token", - token: "", - idt: &mainflux.UserIdentity{}, - err: status.Error(codes.InvalidArgument, "received invalid token request"), - code: codes.Unauthenticated, - }, - } - - for _, tc := range cases { - idt, err := client.Identify(context.Background(), &mainflux.Token{Value: tc.token}) - if idt != nil { - assert.Equal(t, tc.idt, idt, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.idt, idt)) - } - e, ok := status.FromError(err) - assert.True(t, ok, "gRPC status can't be extracted from the error") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - } -} - -func TestAuthorize(t *testing.T) { - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - cases := []struct { - desc string - token string - subject string - object string - relation string - ar *mainflux.AuthorizeRes - err error - code codes.Code - }{ - { - desc: "authorize user with authorized token", - token: loginSecret, - subject: id, - object: authoritiesObj, - relation: memberRelation, - ar: &mainflux.AuthorizeRes{Authorized: true}, - err: nil, - code: codes.OK, - }, - { - desc: "authorize user with unauthorized relation", - token: loginSecret, - subject: id, - object: authoritiesObj, - relation: "unauthorizedRelation", - ar: &mainflux.AuthorizeRes{Authorized: false}, - err: nil, - code: codes.PermissionDenied, - }, - { - desc: "authorize user with unauthorized object", - token: loginSecret, - subject: id, - object: "unauthorizedobject", - relation: memberRelation, - ar: &mainflux.AuthorizeRes{Authorized: false}, - err: nil, - code: codes.PermissionDenied, - }, - { - desc: "authorize user with unauthorized subject", - token: loginSecret, - subject: "unauthorizedSubject", - object: authoritiesObj, - relation: memberRelation, - ar: &mainflux.AuthorizeRes{Authorized: false}, - err: nil, - code: codes.PermissionDenied, - }, - { - desc: "authorize user with invalid ACL", - token: loginSecret, - subject: "", - object: "", - relation: "", - ar: &mainflux.AuthorizeRes{Authorized: false}, - err: nil, - code: codes.InvalidArgument, - }, - } - for _, tc := range cases { - ar, err := client.Authorize(context.Background(), &mainflux.AuthorizeReq{Sub: tc.subject, Obj: tc.object, Act: tc.relation}) - if ar != nil { - assert.Equal(t, tc.ar, ar, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.ar, ar)) - } - - e, ok := status.FromError(err) - assert.True(t, ok, "gRPC status can't be extracted from the error") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - } -} - -func TestAddPolicy(t *testing.T) { - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - groupAdminObj := "groupadmin" - - cases := []struct { - desc string - token string - subject string - object string - relation string - ar *mainflux.AddPolicyRes - err error - code codes.Code - }{ - { - desc: "add groupadmin policy to user", - token: loginSecret, - subject: id, - object: groupAdminObj, - relation: memberRelation, - ar: &mainflux.AddPolicyRes{Authorized: true}, - err: nil, - code: codes.OK, - }, - { - desc: "add policy to user with invalid ACL", - token: loginSecret, - subject: "", - object: "", - relation: "", - ar: &mainflux.AddPolicyRes{Authorized: false}, - err: nil, - code: codes.InvalidArgument, - }, - } - for _, tc := range cases { - apr, err := client.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Sub: tc.subject, Obj: tc.object, Act: tc.relation}) - if apr != nil { - assert.Equal(t, tc.ar, apr, fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.ar, apr)) - } - - e, ok := status.FromError(err) - assert.True(t, ok, "gRPC status can't be extracted from the error") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - } -} - -func TestDeletePolicy(t *testing.T) { - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - readRelation := "read" - thingID := "thing" - - apr, err := client.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Sub: id, Obj: thingID, Act: readRelation}) - require.Nil(t, err, fmt.Sprintf("Adding read policy to user expected to succeed: %s", err)) - require.True(t, apr.GetAuthorized(), fmt.Sprintf("Adding read policy expected to make user authorized, expected %v got %v", true, apr.GetAuthorized())) - - cases := []struct { - desc string - token string - subject string - object string - relation string - dpr *mainflux.DeletePolicyRes - code codes.Code - }{ - { - desc: "delete valid policy", - token: loginSecret, - subject: id, - object: thingID, - relation: readRelation, - dpr: &mainflux.DeletePolicyRes{Deleted: true}, - code: codes.OK, - }, - { - desc: "delete invalid policy", - token: loginSecret, - subject: "", - object: "", - relation: "", - dpr: &mainflux.DeletePolicyRes{Deleted: false}, - code: codes.InvalidArgument, - }, - } - for _, tc := range cases { - dpr, err := client.DeletePolicy(context.Background(), &mainflux.DeletePolicyReq{Sub: tc.subject, Obj: tc.object, Act: tc.relation}) - e, ok := status.FromError(err) - assert.True(t, ok, "gRPC status can't be extracted from the error") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - assert.Equal(t, tc.dpr.GetDeleted(), dpr.GetDeleted(), fmt.Sprintf("%s: expected %v got %v", tc.desc, tc.dpr.GetDeleted(), dpr.GetDeleted())) - } -} - -func TestMembers(t *testing.T) { - _, token, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - group := auth.Group{ - Name: "Mainflux", - Description: description, - } - - var things []string - for i := 0; i < numOfThings; i++ { - thID, err := uuid.New().ID() - require.Nil(t, err, fmt.Sprintf("Generate thing id expected to succeed: %s", err)) - - err = svc.AddPolicy(context.Background(), auth.PolicyReq{Subject: id, Object: thID, Relation: "owner"}) - require.Nil(t, err, fmt.Sprintf("Adding a policy expected to succeed: %s", err)) - - things = append(things, thID) - } - - var users []string - for i := 0; i < numOfUsers; i++ { - id, err := uuid.New().ID() - require.Nil(t, err, fmt.Sprintf("Generate thing id expected to succeed: %s", err)) - - users = append(users, id) - } - - group, err = svc.CreateGroup(context.Background(), token, group) - require.Nil(t, err, fmt.Sprintf("Creating group expected to succeed: %s", err)) - err = svc.AddPolicy(context.Background(), auth.PolicyReq{Subject: id, Object: group.ID, Relation: "groupadmin"}) - require.Nil(t, err, fmt.Sprintf("Adding a policy expected to succeed: %s", err)) - - err = svc.Assign(context.Background(), token, group.ID, thingsType, things...) - require.Nil(t, err, fmt.Sprintf("Assign members to expected to succeed: %s", err)) - - err = svc.Assign(context.Background(), token, group.ID, usersType, users...) - require.Nil(t, err, fmt.Sprintf("Assign members to group expected to succeed: %s", err)) - - cases := []struct { - desc string - token string - groupID string - groupType string - size int - err error - code codes.Code - }{ - { - desc: "get all things with user token", - groupID: group.ID, - token: token, - groupType: thingsType, - size: numOfThings, - err: nil, - code: codes.OK, - }, - { - desc: "get all users with user token", - groupID: group.ID, - token: token, - groupType: usersType, - size: numOfUsers, - err: nil, - code: codes.OK, - }, - } - - authAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(authAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("got unexpected error while creating client connection: %s", err)) - - client := grpcapi.NewClient(mocktracer.New(), conn, time.Second) - - for _, tc := range cases { - m, err := client.Members(context.Background(), &mainflux.MembersReq{Token: tc.token, GroupID: tc.groupID, Type: tc.groupType, Offset: 0, Limit: 10}) - e, ok := status.FromError(err) - assert.Equal(t, tc.size, len(m.Members), fmt.Sprintf("%s: expected %d got %d", tc.desc, tc.size, len(m.Members))) - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.code, e.Code())) - assert.True(t, ok, "OK expected to be true") - } -} diff --git a/auth/api/grpc/requests.go b/auth/api/grpc/requests.go deleted file mode 100644 index 269c950d8d..0000000000 --- a/auth/api/grpc/requests.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" -) - -type identityReq struct { - token string - kind uint32 -} - -func (req identityReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if req.kind != auth.LoginKey && - req.kind != auth.APIKey && - req.kind != auth.RecoveryKey { - return apiutil.ErrInvalidAuthKey - } - - return nil -} - -type issueReq struct { - id string - email string - keyType uint32 -} - -func (req issueReq) validate() error { - if req.email == "" { - return apiutil.ErrMissingEmail - } - if req.keyType != auth.LoginKey && - req.keyType != auth.APIKey && - req.keyType != auth.RecoveryKey { - return apiutil.ErrInvalidAuthKey - } - - return nil -} - -type assignReq struct { - token string - groupID string - memberID string - groupType string -} - -func (req assignReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if req.groupID == "" || req.memberID == "" { - return apiutil.ErrMissingID - } - return nil -} - -type membersReq struct { - token string - groupID string - offset uint64 - limit uint64 - memberType string -} - -func (req membersReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if req.groupID == "" { - return apiutil.ErrMissingID - } - if req.memberType == "" { - return apiutil.ErrMissingMemberType - } - return nil -} - -// authReq represents authorization request. It contains: -// 1. subject - an action invoker -// 2. object - an entity over which action will be executed -// 3. action - type of action that will be executed (read/write) -type authReq struct { - Sub string - Obj string - Act string -} - -func (req authReq) validate() error { - if req.Sub == "" { - return apiutil.ErrMissingPolicySub - } - - if req.Obj == "" { - return apiutil.ErrMissingPolicyObj - } - - if req.Act == "" { - return apiutil.ErrMissingPolicyAct - } - - return nil -} - -type policyReq struct { - Sub string - Obj string - Act string -} - -func (req policyReq) validate() error { - if req.Sub == "" { - return apiutil.ErrMissingPolicySub - } - - if req.Obj == "" { - return apiutil.ErrMissingPolicyObj - } - - if req.Act == "" { - return apiutil.ErrMissingPolicyAct - } - - return nil -} - -type listPoliciesReq struct { - Sub string - Obj string - Act string -} diff --git a/auth/api/grpc/responses.go b/auth/api/grpc/responses.go deleted file mode 100644 index fd21f4df9a..0000000000 --- a/auth/api/grpc/responses.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -type identityRes struct { - id string - email string -} - -type issueRes struct { - value string -} - -type authorizeRes struct { - authorized bool -} - -type addPolicyRes struct { - authorized bool -} - -type deletePolicyRes struct { - deleted bool -} - -type listPoliciesRes struct { - policies []string -} - -type membersRes struct { - total uint64 - offset uint64 - limit uint64 - groupType string - members []string -} -type emptyRes struct { - err error -} diff --git a/auth/api/grpc/server.go b/auth/api/grpc/server.go deleted file mode 100644 index c2a6caa8a6..0000000000 --- a/auth/api/grpc/server.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kitgrpc "github.com/go-kit/kit/transport/grpc" - "github.com/golang/protobuf/ptypes/empty" - mainflux "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" - opentracing "github.com/opentracing/opentracing-go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var _ mainflux.AuthServiceServer = (*grpcServer)(nil) - -type grpcServer struct { - issue kitgrpc.Handler - identify kitgrpc.Handler - authorize kitgrpc.Handler - addPolicy kitgrpc.Handler - deletePolicy kitgrpc.Handler - listPolicies kitgrpc.Handler - assign kitgrpc.Handler - members kitgrpc.Handler - mainflux.UnimplementedAuthServiceServer -} - -// NewServer returns new AuthServiceServer instance. -func NewServer(tracer opentracing.Tracer, svc auth.Service) mainflux.AuthServiceServer { - return &grpcServer{ - issue: kitgrpc.NewServer( - kitot.TraceServer(tracer, "issue")(issueEndpoint(svc)), - decodeIssueRequest, - encodeIssueResponse, - ), - identify: kitgrpc.NewServer( - kitot.TraceServer(tracer, "identify")(identifyEndpoint(svc)), - decodeIdentifyRequest, - encodeIdentifyResponse, - ), - authorize: kitgrpc.NewServer( - kitot.TraceServer(tracer, "authorize")(authorizeEndpoint(svc)), - decodeAuthorizeRequest, - encodeAuthorizeResponse, - ), - addPolicy: kitgrpc.NewServer( - kitot.TraceServer(tracer, "add_policy")(addPolicyEndpoint(svc)), - decodeAddPolicyRequest, - encodeAddPolicyResponse, - ), - deletePolicy: kitgrpc.NewServer( - kitot.TraceServer(tracer, "delete_policy")(deletePolicyEndpoint(svc)), - decodeDeletePolicyRequest, - encodeDeletePolicyResponse, - ), - listPolicies: kitgrpc.NewServer( - kitot.TraceServer(tracer, "list_policies")(listPoliciesEndpoint(svc)), - decodeListPoliciesRequest, - encodeListPoliciesResponse, - ), - assign: kitgrpc.NewServer( - kitot.TraceServer(tracer, "assign")(assignEndpoint(svc)), - decodeAssignRequest, - encodeEmptyResponse, - ), - members: kitgrpc.NewServer( - kitot.TraceServer(tracer, "members")(membersEndpoint(svc)), - decodeMembersRequest, - encodeMembersResponse, - ), - } -} - -func (s *grpcServer) Issue(ctx context.Context, req *mainflux.IssueReq) (*mainflux.Token, error) { - _, res, err := s.issue.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.Token), nil -} - -func (s *grpcServer) Identify(ctx context.Context, token *mainflux.Token) (*mainflux.UserIdentity, error) { - _, res, err := s.identify.ServeGRPC(ctx, token) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.UserIdentity), nil -} - -func (s *grpcServer) Authorize(ctx context.Context, req *mainflux.AuthorizeReq) (*mainflux.AuthorizeRes, error) { - _, res, err := s.authorize.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.AuthorizeRes), nil -} - -func (s *grpcServer) AddPolicy(ctx context.Context, req *mainflux.AddPolicyReq) (*mainflux.AddPolicyRes, error) { - _, res, err := s.addPolicy.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.AddPolicyRes), nil -} - -func (s *grpcServer) DeletePolicy(ctx context.Context, req *mainflux.DeletePolicyReq) (*mainflux.DeletePolicyRes, error) { - _, res, err := s.deletePolicy.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.DeletePolicyRes), nil -} - -func (s *grpcServer) ListPolicies(ctx context.Context, req *mainflux.ListPoliciesReq) (*mainflux.ListPoliciesRes, error) { - _, res, err := s.listPolicies.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.ListPoliciesRes), nil -} - -func (s *grpcServer) Assign(ctx context.Context, token *mainflux.Assignment) (*empty.Empty, error) { - _, res, err := s.assign.ServeGRPC(ctx, token) - if err != nil { - return nil, encodeError(err) - } - return res.(*empty.Empty), nil -} - -func (s *grpcServer) Members(ctx context.Context, req *mainflux.MembersReq) (*mainflux.MembersRes, error) { - _, res, err := s.members.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - return res.(*mainflux.MembersRes), nil -} - -func decodeIssueRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.IssueReq) - return issueReq{id: req.GetId(), email: req.GetEmail(), keyType: req.GetType()}, nil -} - -func encodeIssueResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(issueRes) - return &mainflux.Token{Value: res.value}, nil -} - -func decodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.Token) - return identityReq{token: req.GetValue()}, nil -} - -func encodeIdentifyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(identityRes) - return &mainflux.UserIdentity{Id: res.id, Email: res.email}, nil -} - -func decodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.AuthorizeReq) - return authReq{Act: req.GetAct(), Obj: req.GetObj(), Sub: req.GetSub()}, nil -} - -func encodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(authorizeRes) - return &mainflux.AuthorizeRes{Authorized: res.authorized}, nil -} - -func decodeAddPolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.AddPolicyReq) - return policyReq{Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil -} - -func encodeAddPolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(addPolicyRes) - return &mainflux.AddPolicyRes{Authorized: res.authorized}, nil -} - -func decodeAssignRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.Token) - return assignReq{token: req.GetValue()}, nil -} - -func decodeDeletePolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.DeletePolicyReq) - return policyReq{Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil -} - -func encodeDeletePolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(deletePolicyRes) - return &mainflux.DeletePolicyRes{Deleted: res.deleted}, nil -} - -func decodeListPoliciesRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.ListPoliciesReq) - return listPoliciesReq{Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil -} - -func encodeListPoliciesResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(listPoliciesRes) - return &mainflux.ListPoliciesRes{Policies: res.policies}, nil -} - -func decodeMembersRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.MembersReq) - return membersReq{ - token: req.GetToken(), - groupID: req.GetGroupID(), - memberType: req.GetType(), - offset: req.Offset, - limit: req.Limit, - }, nil -} - -func encodeMembersResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(membersRes) - return &mainflux.MembersRes{ - Total: res.total, - Offset: res.offset, - Limit: res.limit, - Type: res.groupType, - Members: res.members, - }, nil -} - -func encodeEmptyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(emptyRes) - return &empty.Empty{}, encodeError(res.err) -} - -func encodeError(err error) error { - switch { - case errors.Contains(err, nil): - return nil - case errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrInvalidAuthKey, - err == apiutil.ErrMissingID, - err == apiutil.ErrMissingMemberType, - err == apiutil.ErrMissingPolicySub, - err == apiutil.ErrMissingPolicyObj, - err == apiutil.ErrMissingPolicyAct: - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Contains(err, errors.ErrAuthentication), - errors.Contains(err, auth.ErrKeyExpired), - err == apiutil.ErrMissingEmail, - err == apiutil.ErrBearerToken: - return status.Error(codes.Unauthenticated, err.Error()) - case errors.Contains(err, errors.ErrAuthorization): - return status.Error(codes.PermissionDenied, err.Error()) - default: - return status.Error(codes.Internal, "internal server error") - } -} diff --git a/auth/api/grpc/setup_test.go b/auth/api/grpc/setup_test.go deleted file mode 100644 index e490469f8b..0000000000 --- a/auth/api/grpc/setup_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc_test - -import ( - "fmt" - "log" - "net" - "os" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - grpcapi "github.com/mainflux/mainflux/auth/api/grpc" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/mocks" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" - "google.golang.org/grpc" -) - -func TestMain(m *testing.M) { - serverErr := make(chan error) - - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - if err != nil { - log.Fatalf("got unexpected error while creating new listerner: %s", err) - } - - svc = newService() - server := grpc.NewServer() - mainflux.RegisterAuthServiceServer(server, grpcapi.NewServer(mocktracer.New(), svc)) - - // Start gRPC server in detached mode. - go func() { - serverErr <- server.Serve(listener) - }() - - code := m.Run() - - server.GracefulStop() - err = <-serverErr - if err != nil { - log.Fatalln("gPRC Server Terminated : ", err) - } - close(serverErr) - os.Exit(code) -} - -func newService() auth.Service { - repo := mocks.NewKeyRepository() - groupRepo := mocks.NewGroupRepository() - idProvider := uuid.NewMock() - - mockAuthzDB := map[string][]mocks.MockSubjectSet{} - mockAuthzDB[id] = append(mockAuthzDB[id], mocks.MockSubjectSet{Object: authoritiesObj, Relation: memberRelation}) - ketoMock := mocks.NewKetoMock(mockAuthzDB) - - tokenizer := jwt.New(secret) - - return auth.New(repo, groupRepo, idProvider, tokenizer, ketoMock, loginDuration) -} diff --git a/auth/api/http/doc.go b/auth/api/http/doc.go deleted file mode 100644 index 4a86453534..0000000000 --- a/auth/api/http/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 -package http diff --git a/auth/api/http/groups/endpoint.go b/auth/api/http/groups/endpoint.go deleted file mode 100644 index 5315bb4ba1..0000000000 --- a/auth/api/http/groups/endpoint.go +++ /dev/null @@ -1,370 +0,0 @@ -package groups - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/auth" -) - -func createGroupEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createGroupReq) - if err := req.validate(); err != nil { - return groupRes{}, err - } - - group := auth.Group{ - Name: req.Name, - Description: req.Description, - ParentID: req.ParentID, - Metadata: req.Metadata, - } - - group, err := svc.CreateGroup(ctx, req.token, group) - if err != nil { - return groupRes{}, err - } - - return groupRes{created: true, id: group.ID}, nil - } -} - -func viewGroupEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(groupReq) - if err := req.validate(); err != nil { - return viewGroupRes{}, err - } - - group, err := svc.ViewGroup(ctx, req.token, req.id) - if err != nil { - return viewGroupRes{}, err - } - - res := viewGroupRes{ - ID: group.ID, - Name: group.Name, - Description: group.Description, - Metadata: group.Metadata, - ParentID: group.ParentID, - OwnerID: group.OwnerID, - CreatedAt: group.CreatedAt, - UpdatedAt: group.UpdatedAt, - } - - return res, nil - } -} - -func updateGroupEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateGroupReq) - if err := req.validate(); err != nil { - return groupRes{}, err - } - - group := auth.Group{ - ID: req.id, - Name: req.Name, - Description: req.Description, - Metadata: req.Metadata, - } - - _, err := svc.UpdateGroup(ctx, req.token, group) - if err != nil { - return groupRes{}, err - } - - res := groupRes{created: false} - return res, nil - } -} - -func deleteGroupEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(groupReq) - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.RemoveGroup(ctx, req.token, req.id); err != nil { - return nil, err - } - - return deleteRes{}, nil - } -} - -func listGroupsEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listGroupsReq) - if err := req.validate(); err != nil { - return groupPageRes{}, err - } - pm := auth.PageMetadata{ - Level: req.level, - Metadata: req.metadata, - } - page, err := svc.ListGroups(ctx, req.token, pm) - if err != nil { - return groupPageRes{}, err - } - - if req.tree { - return buildGroupsResponseTree(page), nil - } - - return buildGroupsResponse(page), nil - } -} - -func listMemberships(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listMembershipsReq) - if err := req.validate(); err != nil { - return memberPageRes{}, err - } - - pm := auth.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - Metadata: req.metadata, - } - - page, err := svc.ListMemberships(ctx, req.token, req.id, pm) - if err != nil { - return memberPageRes{}, err - } - - return buildGroupsResponse(page), nil - } -} - -func shareGroupAccessEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(shareGroupAccessReq) - if err := req.validate(); err != nil { - return shareGroupRes{}, err - } - - if err := svc.AssignGroupAccessRights(ctx, req.token, req.ThingGroupID, req.userGroupID); err != nil { - return shareGroupRes{}, err - } - return shareGroupRes{}, nil - } -} - -func listChildrenEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listGroupsReq) - if err := req.validate(); err != nil { - return groupPageRes{}, err - } - - pm := auth.PageMetadata{ - Level: req.level, - Metadata: req.metadata, - } - page, err := svc.ListChildren(ctx, req.token, req.id, pm) - if err != nil { - return groupPageRes{}, err - } - - if req.tree { - return buildGroupsResponseTree(page), nil - } - - return buildGroupsResponse(page), nil - } -} - -func listParentsEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listGroupsReq) - if err := req.validate(); err != nil { - return groupPageRes{}, err - } - pm := auth.PageMetadata{ - Level: req.level, - Metadata: req.metadata, - } - - page, err := svc.ListParents(ctx, req.token, req.id, pm) - if err != nil { - return groupPageRes{}, err - } - - if req.tree { - return buildGroupsResponseTree(page), nil - } - - return buildGroupsResponse(page), nil - } -} - -func assignEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(assignReq) - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.Assign(ctx, req.token, req.groupID, req.Type, req.Members...); err != nil { - return nil, err - } - - return assignRes{}, nil - } -} - -func unassignEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(unassignReq) - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.Unassign(ctx, req.token, req.groupID, req.Members...); err != nil { - return nil, err - } - - return unassignRes{}, nil - } -} - -func listMembersEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listMembersReq) - if err := req.validate(); err != nil { - return memberPageRes{}, err - } - - pm := auth.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - Metadata: req.metadata, - } - page, err := svc.ListMembers(ctx, req.token, req.id, req.groupType, pm) - if err != nil { - return memberPageRes{}, err - } - - return buildUsersResponse(page, req.groupType), nil - } -} - -func buildGroupsResponseTree(page auth.GroupPage) groupPageRes { - groupsMap := map[string]*auth.Group{} - // Parents' map keeps its array of children. - parentsMap := map[string][]*auth.Group{} - for i := range page.Groups { - if _, ok := groupsMap[page.Groups[i].ID]; !ok { - groupsMap[page.Groups[i].ID] = &page.Groups[i] - parentsMap[page.Groups[i].ID] = make([]*auth.Group, 0) - } - } - - for _, group := range groupsMap { - if children, ok := parentsMap[group.ParentID]; ok { - children = append(children, group) - parentsMap[group.ParentID] = children - } - } - - res := groupPageRes{ - pageRes: pageRes{ - Limit: page.Limit, - Offset: page.Offset, - Total: page.Total, - Level: page.Level, - }, - Groups: []viewGroupRes{}, - } - - for _, group := range groupsMap { - if children, ok := parentsMap[group.ID]; ok { - group.Children = children - } - - } - - for _, group := range groupsMap { - view := toViewGroupRes(*group) - if children, ok := parentsMap[group.ParentID]; len(children) == 0 || !ok { - res.Groups = append(res.Groups, view) - } - } - - return res -} - -func toViewGroupRes(group auth.Group) viewGroupRes { - view := viewGroupRes{ - ID: group.ID, - ParentID: group.ParentID, - OwnerID: group.OwnerID, - Name: group.Name, - Description: group.Description, - Metadata: group.Metadata, - Level: group.Level, - Path: group.Path, - Children: make([]*viewGroupRes, 0), - CreatedAt: group.CreatedAt, - UpdatedAt: group.UpdatedAt, - } - - for _, ch := range group.Children { - child := toViewGroupRes(*ch) - view.Children = append(view.Children, &child) - } - - return view -} - -func buildGroupsResponse(gp auth.GroupPage) groupPageRes { - res := groupPageRes{ - pageRes: pageRes{ - Total: gp.Total, - Level: gp.Level, - }, - Groups: []viewGroupRes{}, - } - - for _, group := range gp.Groups { - view := viewGroupRes{ - ID: group.ID, - ParentID: group.ParentID, - OwnerID: group.OwnerID, - Name: group.Name, - Description: group.Description, - Metadata: group.Metadata, - Level: group.Level, - Path: group.Path, - CreatedAt: group.CreatedAt, - UpdatedAt: group.UpdatedAt, - } - res.Groups = append(res.Groups, view) - } - - return res -} - -func buildUsersResponse(mp auth.MemberPage, groupType string) memberPageRes { - res := memberPageRes{ - pageRes: pageRes{ - Total: mp.Total, - Offset: mp.Offset, - Limit: mp.Limit, - Name: mp.Name, - }, - Type: groupType, - Members: []string{}, - } - - for _, m := range mp.Members { - res.Members = append(res.Members, m.ID) - } - - return res -} diff --git a/auth/api/http/groups/endpoint_test.go b/auth/api/http/groups/endpoint_test.go deleted file mode 100644 index ad0f625d34..0000000000 --- a/auth/api/http/groups/endpoint_test.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package groups_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - httpapi "github.com/mainflux/mainflux/auth/api/http" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/mocks" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - contentType = "application/json" - email = "user@example.com" - secret = "secret" - id = "testID" - loginDuration = 30 * time.Minute -) - -type testRequest struct { - client *http.Client - method string - url string - contentType string - token string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - req.Close = true - if err != nil { - return nil, err - } - if tr.token != "" { - req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - return tr.client.Do(req) -} - -func newService() auth.Service { - keys := mocks.NewKeyRepository() - groups := mocks.NewGroupRepository() - idProvider := uuid.NewMock() - t := jwt.New(secret) - policies := mocks.NewKetoMock(map[string][]mocks.MockSubjectSet{}) - return auth.New(keys, groups, idProvider, t, policies, loginDuration) -} - -func newServer(svc auth.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, mocktracer.New(), logger) - return httptest.NewServer(mux) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -func TestShareGroupAccess(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - require.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - type shareGroupAccessReq struct { - token string - userGroupID string - ThingGroupID string `json:"thing_group_id"` - } - data := shareGroupAccessReq{token: apiToken, userGroupID: "ug", ThingGroupID: "tg"} - invalidData := shareGroupAccessReq{token: apiToken, userGroupID: "ug", ThingGroupID: ""} - - cases := []struct { - desc string - req string - contentType string - auth string - userGroupID string - status int - }{ - { - desc: "share a user group with thing group", - req: toJSON(data), - contentType: contentType, - auth: apiToken, - userGroupID: "ug", - status: http.StatusOK, - }, - { - desc: "share a user group with invalid thing group", - req: toJSON(invalidData), - contentType: contentType, - auth: apiToken, - userGroupID: "ug", - status: http.StatusBadRequest, - }, - { - desc: "share an invalid user group with thing group", - req: toJSON(data), - contentType: contentType, - auth: apiToken, - userGroupID: "", - status: http.StatusBadRequest, - }, - { - desc: "share an invalid user group with invalid thing group", - req: toJSON(invalidData), - contentType: contentType, - auth: apiToken, - userGroupID: "", - status: http.StatusBadRequest, - }, - { - desc: "share a user group with thing group with invalid content type", - req: toJSON(data), - contentType: "", - auth: apiToken, - userGroupID: "ug", - status: http.StatusUnsupportedMediaType, - }, - { - desc: "share a user group with thing group with invalid token", - req: toJSON(data), - contentType: contentType, - auth: "token", - userGroupID: "ug", - status: http.StatusUnauthorized, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/groups/%s/share", ts.URL, tc.userGroupID), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} diff --git a/auth/api/http/groups/requests.go b/auth/api/http/groups/requests.go deleted file mode 100644 index b95c22ebc2..0000000000 --- a/auth/api/http/groups/requests.go +++ /dev/null @@ -1,191 +0,0 @@ -package groups - -import ( - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" -) - -type createGroupReq struct { - token string - Name string `json:"name,omitempty"` - ParentID string `json:"parent_id,omitempty"` - Description string `json:"description,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req createGroupReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if len(req.Name) > maxNameSize || req.Name == "" { - return apiutil.ErrNameSize - } - - return nil -} - -type updateGroupReq struct { - token string - id string - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req updateGroupReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type listGroupsReq struct { - token string - id string - level uint64 - // - `true` - result is JSON tree representing groups hierarchy, - // - `false` - result is JSON array of groups. - tree bool - metadata auth.GroupMetadata -} - -func (req listGroupsReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.level > auth.MaxLevel || req.level < auth.MinLevel { - return apiutil.ErrMaxLevelExceeded - } - - return nil -} - -type listMembersReq struct { - token string - id string - groupType string - offset uint64 - limit uint64 - tree bool - metadata auth.GroupMetadata -} - -func (req listMembersReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type listMembershipsReq struct { - token string - id string - offset uint64 - limit uint64 - metadata auth.GroupMetadata -} - -func (req listMembershipsReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type assignReq struct { - token string - groupID string - Type string `json:"type,omitempty"` - Members []string `json:"members"` -} - -func (req assignReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.Type == "" { - return apiutil.ErrMissingMemberType - } - - if req.groupID == "" { - return apiutil.ErrMissingID - } - - if len(req.Members) == 0 { - return apiutil.ErrEmptyList - } - - return nil -} - -type shareGroupAccessReq struct { - token string - userGroupID string - ThingGroupID string `json:"thing_group_id"` -} - -func (req shareGroupAccessReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.ThingGroupID == "" || req.userGroupID == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type unassignReq struct { - assignReq -} - -func (req unassignReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.groupID == "" { - return apiutil.ErrMissingID - } - - if len(req.Members) == 0 { - return apiutil.ErrEmptyList - } - - return nil -} - -type groupReq struct { - token string - id string -} - -func (req groupReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - return nil -} diff --git a/auth/api/http/groups/responses.go b/auth/api/http/groups/responses.go deleted file mode 100644 index bf364bf36b..0000000000 --- a/auth/api/http/groups/responses.go +++ /dev/null @@ -1,174 +0,0 @@ -package groups - -import ( - "fmt" - "net/http" - "time" - - "github.com/mainflux/mainflux" -) - -var ( - _ mainflux.Response = (*memberPageRes)(nil) - _ mainflux.Response = (*groupRes)(nil) - _ mainflux.Response = (*deleteRes)(nil) - _ mainflux.Response = (*assignRes)(nil) - _ mainflux.Response = (*unassignRes)(nil) -) - -type memberPageRes struct { - pageRes - Type string `json:"type"` - Members []string `json:"members"` -} - -func (res memberPageRes) Code() int { - return http.StatusOK -} - -func (res memberPageRes) Headers() map[string]string { - return map[string]string{} -} - -func (res memberPageRes) Empty() bool { - return false -} - -type shareGroupRes struct { -} - -func (res shareGroupRes) Code() int { - return http.StatusOK -} - -func (res shareGroupRes) Headers() map[string]string { - return map[string]string{} -} - -func (res shareGroupRes) Empty() bool { - return false -} - -type viewGroupRes struct { - ID string `json:"id"` - Name string `json:"name"` - OwnerID string `json:"owner_id"` - ParentID string `json:"parent_id,omitempty"` - Description string `json:"description,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - // Indicates a level in tree hierarchy from first group node - root. - Level int `json:"level"` - // Path in a tree consisting of group ids - // parentID1.parentID2.childID1 - // e.g. 01EXPM5Z8HRGFAEWTETR1X1441.01EXPKW2TVK74S5NWQ979VJ4PJ.01EXPKW2TVK74S5NWQ979VJ4PJ - Path string `json:"path"` - Children []*viewGroupRes `json:"children,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -func (res viewGroupRes) Code() int { - return http.StatusOK -} - -func (res viewGroupRes) Headers() map[string]string { - return map[string]string{} -} - -func (res viewGroupRes) Empty() bool { - return false -} - -type groupRes struct { - id string - created bool -} - -func (res groupRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res groupRes) Headers() map[string]string { - if res.created { - return map[string]string{ - "Location": fmt.Sprintf("/groups/%s", res.id), - } - } - - return map[string]string{} -} - -func (res groupRes) Empty() bool { - return true -} - -type groupPageRes struct { - pageRes - Groups []viewGroupRes `json:"groups"` -} - -type pageRes struct { - Limit uint64 `json:"limit,omitempty"` - Offset uint64 `json:"offset,omitempty"` - Total uint64 `json:"total"` - Level uint64 `json:"level"` - Name string `json:"name"` -} - -func (res groupPageRes) Code() int { - return http.StatusOK -} - -func (res groupPageRes) Headers() map[string]string { - return map[string]string{} -} - -func (res groupPageRes) Empty() bool { - return false -} - -type deleteRes struct{} - -func (res deleteRes) Code() int { - return http.StatusNoContent -} - -func (res deleteRes) Headers() map[string]string { - return map[string]string{} -} - -func (res deleteRes) Empty() bool { - return true -} - -type assignRes struct{} - -func (res assignRes) Code() int { - return http.StatusOK -} - -func (res assignRes) Headers() map[string]string { - return map[string]string{} -} - -func (res assignRes) Empty() bool { - return true -} - -type unassignRes struct{} - -func (res unassignRes) Code() int { - return http.StatusNoContent -} - -func (res unassignRes) Headers() map[string]string { - return map[string]string{} -} - -func (res unassignRes) Empty() bool { - return true -} diff --git a/auth/api/http/groups/transport.go b/auth/api/http/groups/transport.go deleted file mode 100644 index 0f2af5ddf6..0000000000 --- a/auth/api/http/groups/transport.go +++ /dev/null @@ -1,354 +0,0 @@ -package groups - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/opentracing/opentracing-go" -) - -const ( - contentType = "application/json" - maxNameSize = 254 - offsetKey = "offset" - limitKey = "limit" - levelKey = "level" - metadataKey = "metadata" - treeKey = "tree" - groupType = "type" - defOffset = 0 - defLimit = 10 - defLevel = 1 -) - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc auth.Service, mux *bone.Mux, tracer opentracing.Tracer, logger logger.Logger) *bone.Mux { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - mux.Post("/groups", kithttp.NewServer( - kitot.TraceServer(tracer, "create_group")(createGroupEndpoint(svc)), - decodeGroupCreate, - encodeResponse, - opts..., - )) - - mux.Get("/groups/:groupID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_group")(viewGroupEndpoint(svc)), - decodeGroupRequest, - encodeResponse, - opts..., - )) - - mux.Put("/groups/:groupID", kithttp.NewServer( - kitot.TraceServer(tracer, "update_group")(updateGroupEndpoint(svc)), - decodeGroupUpdate, - encodeResponse, - opts..., - )) - - mux.Delete("/groups/:groupID", kithttp.NewServer( - kitot.TraceServer(tracer, "delete_group")(deleteGroupEndpoint(svc)), - decodeGroupRequest, - encodeResponse, - opts..., - )) - - mux.Post("/groups/:groupID/share", kithttp.NewServer( - kitot.TraceServer(tracer, "share_group_access")(shareGroupAccessEndpoint(svc)), - decodeShareGroupRequest, - encodeResponse, - opts..., - )) - - mux.Get("/groups", kithttp.NewServer( - kitot.TraceServer(tracer, "list_groups")(listGroupsEndpoint(svc)), - decodeListGroupsRequest, - encodeResponse, - opts..., - )) - - mux.Get("/groups/:groupID/children", kithttp.NewServer( - kitot.TraceServer(tracer, "list_children")(listChildrenEndpoint(svc)), - decodeListGroupsRequest, - encodeResponse, - opts..., - )) - - mux.Get("/groups/:groupID/parents", kithttp.NewServer( - kitot.TraceServer(tracer, "list_parents_groups")(listParentsEndpoint(svc)), - decodeListGroupsRequest, - encodeResponse, - opts..., - )) - - mux.Post("/groups/:groupID/members/assign", kithttp.NewServer( - kitot.TraceServer(tracer, "assign")(assignEndpoint(svc)), - decodeAssignRequest, - encodeResponse, - opts..., - )) - - mux.Post("/groups/:groupID/members/unassign", kithttp.NewServer( - kitot.TraceServer(tracer, "unassign")(unassignEndpoint(svc)), - decodeUnassignRequest, - encodeResponse, - opts..., - )) - - mux.Get("/groups/:groupID/members", kithttp.NewServer( - kitot.TraceServer(tracer, "list_members")(listMembersEndpoint(svc)), - decodeListMembersRequest, - encodeResponse, - opts..., - )) - - mux.Get("/members/:memberID/groups", kithttp.NewServer( - kitot.TraceServer(tracer, "list_memberships")(listMemberships(svc)), - decodeListMembershipsRequest, - encodeResponse, - opts..., - )) - - return mux -} - -func decodeShareGroupRequest(ctx context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := shareGroupAccessReq{ - token: apiutil.ExtractBearerToken(r), - userGroupID: bone.GetValue(r, "groupID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeListGroupsRequest(_ context.Context, r *http.Request) (interface{}, error) { - l, err := apiutil.ReadUintQuery(r, levelKey, defLevel) - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - - t, err := apiutil.ReadBoolQuery(r, treeKey, false) - if err != nil { - return nil, err - } - - req := listGroupsReq{ - token: apiutil.ExtractBearerToken(r), - level: l, - metadata: m, - tree: t, - id: bone.GetValue(r, "groupID"), - } - return req, nil -} - -func decodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - - tree, err := apiutil.ReadBoolQuery(r, treeKey, false) - if err != nil { - return nil, err - } - - t, err := apiutil.ReadStringQuery(r, groupType, "") - if err != nil { - return nil, err - } - - req := listMembersReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "groupID"), - groupType: t, - offset: o, - limit: l, - metadata: m, - tree: tree, - } - return req, nil -} - -func decodeListMembershipsRequest(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - - req := listMembershipsReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "memberID"), - offset: o, - limit: l, - metadata: m, - } - - return req, nil -} - -func decodeGroupCreate(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := createGroupReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeGroupUpdate(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := updateGroupReq{ - id: bone.GetValue(r, "groupID"), - token: apiutil.ExtractBearerToken(r), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeGroupRequest(_ context.Context, r *http.Request) (interface{}, error) { - req := groupReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "groupID"), - } - - return req, nil -} - -func decodeAssignRequest(_ context.Context, r *http.Request) (interface{}, error) { - req := assignReq{ - token: apiutil.ExtractBearerToken(r), - groupID: bone.GetValue(r, "groupID"), - } - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeUnassignRequest(_ context.Context, r *http.Request) (interface{}, error) { - req := unassignReq{ - assignReq{ - token: apiutil.ExtractBearerToken(r), - groupID: bone.GetValue(r, "groupID"), - }, - } - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - w.Header().Set("Content-Type", contentType) - - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - case errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrMissingID, - err == apiutil.ErrEmptyList, - err == apiutil.ErrMissingMemberType, - err == apiutil.ErrNameSize: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrAuthentication): - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - case errors.Contains(err, errors.ErrConflict): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrAuthorization): - w.WriteHeader(http.StatusForbidden) - case errors.Contains(err, auth.ErrMemberAlreadyAssigned): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - - case errors.Contains(err, errors.ErrCreateEntity), - errors.Contains(err, errors.ErrUpdateEntity), - errors.Contains(err, errors.ErrViewEntity), - errors.Contains(err, errors.ErrRemoveEntity): - w.WriteHeader(http.StatusInternalServerError) - - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/auth/api/http/keys/endpoint.go b/auth/api/http/keys/endpoint.go deleted file mode 100644 index 9233afbb4b..0000000000 --- a/auth/api/http/keys/endpoint.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "context" - "time" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/auth" -) - -func issueEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(issueKeyReq) - if err := req.validate(); err != nil { - return nil, err - } - - now := time.Now().UTC() - newKey := auth.Key{ - IssuedAt: now, - Type: req.Type, - } - - duration := time.Duration(req.Duration * time.Second) - if duration != 0 { - exp := now.Add(duration) - newKey.ExpiresAt = exp - } - - key, secret, err := svc.Issue(ctx, req.token, newKey) - if err != nil { - return nil, err - } - - res := issueKeyRes{ - ID: key.ID, - Value: secret, - IssuedAt: key.IssuedAt, - } - if !key.ExpiresAt.IsZero() { - res.ExpiresAt = &key.ExpiresAt - } - return res, nil - } -} - -func retrieveEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(keyReq) - - if err := req.validate(); err != nil { - return nil, err - } - - key, err := svc.RetrieveKey(ctx, req.token, req.id) - - if err != nil { - return nil, err - } - ret := retrieveKeyRes{ - ID: key.ID, - IssuerID: key.IssuerID, - Subject: key.Subject, - Type: key.Type, - IssuedAt: key.IssuedAt, - } - if !key.ExpiresAt.IsZero() { - ret.ExpiresAt = &key.ExpiresAt - } - - return ret, nil - } -} - -func retrieveKeysEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listKeysReq) - - if err := req.validate(); err != nil { - return nil, err - } - pm := auth.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - Subject: req.subject, - Type: req.keyType, - } - kp, err := svc.RetrieveKeys(ctx, req.token, pm) - if err != nil { - return nil, err - } - - res := keyPageRes{ - pageRes: pageRes{ - Limit: kp.Limit, - Offset: kp.Offset, - Total: kp.Total, - }, - Keys: []retrieveKeyRes{}, - } - - for _, key := range kp.Keys { - view := retrieveKeyRes{ - ID: key.ID, - IssuerID: key.IssuerID, - Subject: key.Subject, - Type: key.Type, - IssuedAt: key.IssuedAt, - ExpiresAt: &key.ExpiresAt, - } - res.Keys = append(res.Keys, view) - } - - return res, nil - } -} - -func revokeEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(keyReq) - - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.Revoke(ctx, req.token, req.id); err != nil { - return nil, err - } - - return revokeKeyRes{}, nil - } -} diff --git a/auth/api/http/keys/endpoint_test.go b/auth/api/http/keys/endpoint_test.go deleted file mode 100644 index c03057b602..0000000000 --- a/auth/api/http/keys/endpoint_test.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keys_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - httpapi "github.com/mainflux/mainflux/auth/api/http" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/mocks" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - secret = "secret" - contentType = "application/json" - id = "123e4567-e89b-12d3-a456-000000000001" - email = "user@example.com" - loginDuration = 30 * time.Minute -) - -type issueRequest struct { - Duration time.Duration `json:"duration,omitempty"` - Type uint32 `json:"type,omitempty"` -} - -type testRequest struct { - client *http.Client - method string - url string - contentType string - token string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - if err != nil { - return nil, err - } - if tr.token != "" { - req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - - req.Header.Set("Referer", "http://localhost") - return tr.client.Do(req) -} - -func newService() auth.Service { - repo := mocks.NewKeyRepository() - groupRepo := mocks.NewGroupRepository() - idProvider := uuid.NewMock() - t := jwt.New(secret) - - mockAuthzDB := map[string][]mocks.MockSubjectSet{} - mockAuthzDB[id] = append(mockAuthzDB[id], mocks.MockSubjectSet{Object: "authorities", Relation: "member"}) - ketoMock := mocks.NewKetoMock(mockAuthzDB) - - return auth.New(repo, groupRepo, idProvider, t, ketoMock, loginDuration) -} - -func newServer(svc auth.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, mocktracer.New(), logger) - return httptest.NewServer(mux) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -func TestIssue(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - lk := issueRequest{Type: auth.LoginKey} - ak := issueRequest{Type: auth.APIKey, Duration: time.Hour} - rk := issueRequest{Type: auth.RecoveryKey} - - cases := []struct { - desc string - req string - ct string - token string - status int - }{ - { - desc: "issue login key with empty token", - req: toJSON(lk), - ct: contentType, - token: "", - status: http.StatusUnauthorized, - }, - { - desc: "issue API key", - req: toJSON(ak), - ct: contentType, - token: loginSecret, - status: http.StatusCreated, - }, - { - desc: "issue recovery key", - req: toJSON(rk), - ct: contentType, - token: loginSecret, - status: http.StatusCreated, - }, - { - desc: "issue login key wrong content type", - req: toJSON(lk), - ct: "", - token: loginSecret, - status: http.StatusUnsupportedMediaType, - }, - { - desc: "issue recovery key wrong content type", - req: toJSON(rk), - ct: "", - token: loginSecret, - status: http.StatusUnsupportedMediaType, - }, - { - desc: "issue key with an invalid token", - req: toJSON(ak), - ct: contentType, - token: "wrong", - status: http.StatusUnauthorized, - }, - { - desc: "issue recovery key with empty token", - req: toJSON(rk), - ct: contentType, - token: "", - status: http.StatusUnauthorized, - }, - { - desc: "issue key with invalid request", - req: "{", - ct: contentType, - token: loginSecret, - status: http.StatusBadRequest, - }, - { - desc: "issue key with invalid JSON", - req: "{invalid}", - ct: contentType, - token: loginSecret, - status: http.StatusBadRequest, - }, - { - desc: "issue key with invalid JSON content", - req: `{"Type":{"key":"value"}}`, - ct: contentType, - token: loginSecret, - status: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPost, - url: fmt.Sprintf("%s/keys", ts.URL), - contentType: tc.ct, - token: tc.token, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} - -func TestRetrieve(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - key := auth.Key{Type: auth.APIKey, IssuedAt: time.Now(), IssuerID: id, Subject: email} - - k, _, err := svc.Issue(context.Background(), loginSecret, key) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - cases := []struct { - desc string - id string - token string - status int - }{ - { - desc: "retrieve an existing key", - id: k.ID, - token: loginSecret, - status: http.StatusOK, - }, - { - desc: "retrieve a non-existing key", - id: "non-existing", - token: loginSecret, - status: http.StatusNotFound, - }, - { - desc: "retrieve a key with an invalid token", - id: k.ID, - token: "wrong", - status: http.StatusUnauthorized, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodGet, - url: fmt.Sprintf("%s/keys/%s", ts.URL, tc.id), - token: tc.token, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} - -func TestRetrieveAll(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - n := uint64(100) - var data []auth.Key - for i := uint64(0); i < n; i++ { - key := auth.Key{Type: auth.APIKey, IssuedAt: time.Now(), IssuerID: id, Subject: fmt.Sprintf("user_%d@example.com", i)} - - k, _, err := svc.Issue(context.Background(), loginSecret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - k.ExpiresAt = time.Time{} - data = append(data, k) - } - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - cases := []struct { - desc string - url string - auth string - status int - res []auth.Key - }{ - { - desc: "get a list of keys", - auth: loginSecret, - status: http.StatusOK, - url: fmt.Sprintf("?offset=%d&limit=%d", 0, 5), - res: data[0:5], - }, - { - desc: "get a list of keys with invalid token", - auth: "wrongValue", - status: http.StatusUnauthorized, - url: fmt.Sprintf("?offset=%d&limit=%d", 0, 1), - res: nil, - }, - { - desc: "get a list of keys with empty token", - auth: "", - status: http.StatusUnauthorized, - url: fmt.Sprintf("?offset=%d&limit=%d", 0, 1), - res: nil, - }, - { - desc: "get a list of keys with negative offset", - auth: loginSecret, - status: http.StatusBadRequest, - url: fmt.Sprintf("?offset=%d&limit=%d", -1, 5), - res: nil, - }, - { - desc: "get a list of keys with negative limit", - auth: loginSecret, - status: http.StatusBadRequest, - url: fmt.Sprintf("?offset=%d&limit=%d", 1, -5), - res: nil, - }, - { - desc: "get a list of keys with zero limit and offset 1", - auth: loginSecret, - status: http.StatusBadRequest, - url: fmt.Sprintf("?offset=%d&limit=%d", 1, 0), - res: nil, - }, - { - desc: "get a list of keys without offset", - auth: loginSecret, - status: http.StatusOK, - url: fmt.Sprintf("?limit=%d", 5), - res: data[0:5], - }, - { - desc: "get a list of keys without limit", - auth: loginSecret, - status: http.StatusOK, - url: fmt.Sprintf("?offset=%d", 1), - res: data[1:11], - }, - { - desc: "get a list of keys with redundant query params", - auth: loginSecret, - status: http.StatusOK, - url: fmt.Sprintf("?offset=%d&limit=%d&value=something", 0, 5), - res: data[0:5], - }, - { - desc: "get a list of keys with default URL", - auth: loginSecret, - status: http.StatusOK, - url: "", - res: data[0:10], - }, - { - desc: "get a list of keys with invalid number of params", - auth: loginSecret, - status: http.StatusBadRequest, - url: "?offset=4&limit=4&limit=5&offset=5", - res: nil, - }, - { - desc: "get a list of keys with invalid offset", - auth: loginSecret, - status: http.StatusBadRequest, - url: "?offset=e&limit=5", - res: nil, - }, - { - desc: "get a list of keys with invalid limit", - auth: loginSecret, - status: http.StatusBadRequest, - url: "?offset=5&limit=e", - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodGet, - url: fmt.Sprintf("%s/keys%s", ts.URL, tc.url), - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} - -func TestRevoke(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - key := auth.Key{Type: auth.APIKey, IssuedAt: time.Now(), IssuerID: id, Subject: email} - - k, _, err := svc.Issue(context.Background(), loginSecret, key) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - cases := []struct { - desc string - id string - token string - status int - }{ - { - desc: "revoke an existing key", - id: k.ID, - token: loginSecret, - status: http.StatusNoContent, - }, - { - desc: "revoke a non-existing key", - id: "non-existing", - token: loginSecret, - status: http.StatusNoContent, - }, - { - desc: "revoke key with invalid token", - id: k.ID, - token: "wrong", - status: http.StatusUnauthorized}, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodDelete, - url: fmt.Sprintf("%s/keys/%s", ts.URL, tc.id), - token: tc.token, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} diff --git a/auth/api/http/keys/requests.go b/auth/api/http/keys/requests.go deleted file mode 100644 index 8f2ef0b32e..0000000000 --- a/auth/api/http/keys/requests.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" -) - -type issueKeyReq struct { - token string - Type uint32 `json:"type,omitempty"` - Duration time.Duration `json:"duration,omitempty"` -} - -// It is not possible to issue Reset key using HTTP API. -func (req issueKeyReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.Type != auth.LoginKey && - req.Type != auth.RecoveryKey && - req.Type != auth.APIKey { - return apiutil.ErrInvalidAPIKey - } - - return nil -} - -type keyReq struct { - token string - id string -} - -func (req keyReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - return nil -} - -type listKeysReq struct { - token string - subject string - keyType uint32 - offset uint64 - limit uint64 -} - -func (req listKeysReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.limit < 1 { - return apiutil.ErrLimitSize - } - - return nil -} diff --git a/auth/api/http/keys/responses.go b/auth/api/http/keys/responses.go deleted file mode 100644 index 31fd279341..0000000000 --- a/auth/api/http/keys/responses.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "net/http" - "time" - - "github.com/mainflux/mainflux" -) - -var ( - _ mainflux.Response = (*issueKeyRes)(nil) - _ mainflux.Response = (*revokeKeyRes)(nil) -) - -type issueKeyRes struct { - ID string `json:"id,omitempty"` - Value string `json:"value,omitempty"` - IssuedAt time.Time `json:"issued_at,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` -} - -func (res issueKeyRes) Code() int { - return http.StatusCreated -} - -func (res issueKeyRes) Headers() map[string]string { - return map[string]string{} -} - -func (res issueKeyRes) Empty() bool { - return res.Value == "" -} - -type retrieveKeyRes struct { - ID string `json:"id,omitempty"` - IssuerID string `json:"issuer_id,omitempty"` - Subject string `json:"subject,omitempty"` - Type uint32 `json:"type,omitempty"` - IssuedAt time.Time `json:"issued_at,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` -} - -func (res retrieveKeyRes) Code() int { - return http.StatusOK -} - -func (res retrieveKeyRes) Headers() map[string]string { - return map[string]string{} -} - -func (res retrieveKeyRes) Empty() bool { - return false -} - -type keyPageRes struct { - pageRes - Keys []retrieveKeyRes `json:"keys"` -} - -type pageRes struct { - Limit uint64 `json:"limit,omitempty"` - Offset uint64 `json:"offset,omitempty"` - Total uint64 `json:"total"` -} - -type revokeKeyRes struct { -} - -func (res revokeKeyRes) Code() int { - return http.StatusNoContent -} - -func (res revokeKeyRes) Headers() map[string]string { - return map[string]string{} -} - -func (res revokeKeyRes) Empty() bool { - return true -} diff --git a/auth/api/http/keys/transport.go b/auth/api/http/keys/transport.go deleted file mode 100644 index 1de1cdff3f..0000000000 --- a/auth/api/http/keys/transport.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keys - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/opentracing/opentracing-go" -) - -const ( - contentType = "application/json" - offsetKey = "offset" - limitKey = "limit" - subjectKey = "subject" - typeKey = "type" - defOffset = 0 - defLimit = 10 - defType = 2 -) - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc auth.Service, mux *bone.Mux, tracer opentracing.Tracer, logger logger.Logger) *bone.Mux { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - mux.Post("/keys", kithttp.NewServer( - kitot.TraceServer(tracer, "issue")(issueEndpoint(svc)), - decodeIssue, - encodeResponse, - opts..., - )) - mux.Get("/keys", kithttp.NewServer( - kitot.TraceServer(tracer, "issue")(retrieveKeysEndpoint(svc)), - decodeListKeysRequest, - encodeResponse, - opts..., - )) - - mux.Get("/keys/:keyID", kithttp.NewServer( - kitot.TraceServer(tracer, "retrieve")(retrieveEndpoint(svc)), - decodeKeyReq, - encodeResponse, - opts..., - )) - - mux.Delete("/keys/:keyID", kithttp.NewServer( - kitot.TraceServer(tracer, "revoke")(revokeEndpoint(svc)), - decodeKeyReq, - encodeResponse, - opts..., - )) - - return mux -} - -func decodeIssue(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := issueKeyReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeKeyReq(_ context.Context, r *http.Request) (interface{}, error) { - req := keyReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "keyID"), - } - return req, nil -} - -func decodeListKeysRequest(_ context.Context, r *http.Request) (interface{}, error) { - s, err := apiutil.ReadStringQuery(r, subjectKey, "") - if err != nil { - return nil, err - } - - t, err := apiutil.ReadUintQuery(r, typeKey, defType) - if err != nil { - return nil, err - } - - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - req := listKeysReq{ - token: apiutil.ExtractBearerToken(r), - subject: s, - keyType: uint32(t), - offset: o, - limit: l, - } - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - w.Header().Set("Content-Type", contentType) - - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - case errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrMissingID, - err == apiutil.ErrInvalidAPIKey: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrAuthentication), - err == apiutil.ErrBearerToken: - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - case errors.Contains(err, errors.ErrInvalidQueryParams), - errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrMissingID, - err == apiutil.ErrBearerKey, - err == apiutil.ErrLimitSize, - err == apiutil.ErrOffsetSize, - err == apiutil.ErrInvalidIDFormat: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrConflict): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/auth/api/http/policies/endpoint.go b/auth/api/http/policies/endpoint.go deleted file mode 100644 index e6a79617eb..0000000000 --- a/auth/api/http/policies/endpoint.go +++ /dev/null @@ -1,38 +0,0 @@ -package policies - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/auth" -) - -func createPolicyEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(policiesReq) - if err := req.validate(); err != nil { - return createPolicyRes{}, err - } - - if err := svc.AddPolicies(ctx, req.token, req.Object, req.SubjectIDs, req.Policies); err != nil { - return createPolicyRes{}, err - } - - return createPolicyRes{created: true}, nil - } -} - -func deletePoliciesEndpoint(svc auth.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(policiesReq) - if err := req.validate(); err != nil { - return deletePoliciesRes{}, err - } - - if err := svc.DeletePolicies(ctx, req.token, req.Object, req.SubjectIDs, req.Policies); err != nil { - return deletePoliciesRes{}, err - } - - return deletePoliciesRes{deleted: true}, nil - } -} diff --git a/auth/api/http/policies/endpoint_test.go b/auth/api/http/policies/endpoint_test.go deleted file mode 100644 index 1189316c5b..0000000000 --- a/auth/api/http/policies/endpoint_test.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package policies_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - httpapi "github.com/mainflux/mainflux/auth/api/http" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/mocks" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - secret = "secret" - contentType = "application/json" - id = uuid.Prefix + "-000000000001" - email = "user@example.com" - unauthzID = uuid.Prefix + "-000000000002" - unauthzEmail = "unauthz@example.com" - loginDuration = 30 * time.Minute -) - -type testRequest struct { - client *http.Client - method string - url string - contentType string - token string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - if err != nil { - return nil, err - } - if tr.token != "" { - req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - - req.Header.Set("Referer", "http://localhost") - return tr.client.Do(req) -} - -func newService() auth.Service { - repo := mocks.NewKeyRepository() - groupRepo := mocks.NewGroupRepository() - idProvider := uuid.NewMock() - t := jwt.New(secret) - - mockAuthzDB := map[string][]mocks.MockSubjectSet{} - mockAuthzDB[id] = append(mockAuthzDB[id], mocks.MockSubjectSet{Object: "authorities", Relation: "member"}) - mockAuthzDB[unauthzID] = append(mockAuthzDB[unauthzID], mocks.MockSubjectSet{Object: "users", Relation: "member"}) - ketoMock := mocks.NewKetoMock(mockAuthzDB) - - return auth.New(repo, groupRepo, idProvider, t, ketoMock, loginDuration) -} - -func newServer(svc auth.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, mocktracer.New(), logger) - return httptest.NewServer(mux) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -type addPolicyRequest struct { - SubjectIDs []string `json:"subjects"` - Policies []string `json:"policies"` - Object string `json:"object"` -} - -func TestAddPolicies(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - _, userLoginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: unauthzID, Subject: unauthzEmail}) - require.Nil(t, err, fmt.Sprintf("Issuing unauthorized user's key expected to succeed: %s", err)) - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - valid := addPolicyRequest{Object: "obj", Policies: []string{"read"}, SubjectIDs: []string{"user1", "user2"}} - multipleValid := addPolicyRequest{Object: "obj", Policies: []string{"write", "delete"}, SubjectIDs: []string{"user1", "user2"}} - invalidObject := addPolicyRequest{Object: "", Policies: []string{"read"}, SubjectIDs: []string{"user1", "user2"}} - invalidPolicies := addPolicyRequest{Object: "obj", Policies: []string{"read", "invalid"}, SubjectIDs: []string{"user1", "user2"}} - invalidSubjects := addPolicyRequest{Object: "obj", Policies: []string{"read", "access"}, SubjectIDs: []string{"", "user2"}} - - cases := []struct { - desc string - token string - ct string - status int - req string - }{ - { - desc: "Add policies with authorized access", - token: loginSecret, - ct: contentType, - status: http.StatusCreated, - req: toJSON(valid), - }, - { - desc: "Add multiple policies to multiple user", - token: loginSecret, - ct: contentType, - status: http.StatusCreated, - req: toJSON(multipleValid), - }, - { - desc: "Add policies with unauthorized access", - token: userLoginSecret, - ct: contentType, - status: http.StatusForbidden, - req: toJSON(valid), - }, - { - desc: "Add policies with invalid token", - token: "invalid", - ct: contentType, - status: http.StatusUnauthorized, - req: toJSON(valid), - }, - { - desc: "Add policies with empty token", - token: "", - ct: contentType, - status: http.StatusUnauthorized, - req: toJSON(valid), - }, - { - desc: "Add policies with invalid content type", - token: loginSecret, - ct: "text/html", - status: http.StatusUnsupportedMediaType, - req: toJSON(valid), - }, - { - desc: "Add policies with empty content type", - token: loginSecret, - ct: "", - status: http.StatusUnsupportedMediaType, - req: toJSON(valid), - }, - { - desc: "Add policies with invalid object field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidObject), - }, - { - desc: "Add policies with invalid policies field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidPolicies), - }, - { - desc: "Add policies with invalid subjects field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidSubjects), - }, - { - desc: "Add policies with empty request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: "", - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPost, - url: fmt.Sprintf("%s/policies", ts.URL), - contentType: tc.ct, - token: tc.token, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} - -func TestDeletePolicies(t *testing.T) { - svc := newService() - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing user key expected to succeed: %s", err)) - - _, userLoginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: unauthzID, Subject: unauthzEmail}) - require.Nil(t, err, fmt.Sprintf("Issuing unauthorized user's key expected to succeed: %s", err)) - - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - policies := addPolicyRequest{Object: "obj", Policies: []string{"read", "write", "delete"}, SubjectIDs: []string{"user1", "user2", "user3"}} - err = svc.AddPolicies(context.Background(), loginSecret, policies.Object, policies.SubjectIDs, policies.Policies) - require.Nil(t, err, fmt.Sprintf("Adding policies expected to succeed: %s", err)) - - validSingleDeleteReq := addPolicyRequest{Object: "obj", Policies: []string{"read"}, SubjectIDs: []string{"user1"}} - validMultipleDeleteReq := addPolicyRequest{Object: "obj", Policies: []string{"write", "delete"}, SubjectIDs: []string{"user2", "user3"}} - invalidObject := addPolicyRequest{Object: "", Policies: []string{"read"}, SubjectIDs: []string{"user1", "user2"}} - invalidPolicies := addPolicyRequest{Object: "obj", Policies: []string{"read", "invalid"}, SubjectIDs: []string{"user1", "user2"}} - invalidSubjects := addPolicyRequest{Object: "obj", Policies: []string{"read", "access"}, SubjectIDs: []string{"", "user2"}} - - cases := []struct { - desc string - token string - ct string - req string - status int - }{ - { - desc: "Delete policies with unauthorized access", - token: userLoginSecret, - ct: contentType, - status: http.StatusForbidden, - req: toJSON(validMultipleDeleteReq), - }, - { - desc: "Delete policies with invalid token", - token: "invalid", - ct: contentType, - status: http.StatusUnauthorized, - req: toJSON(validSingleDeleteReq), - }, - { - desc: "Delete policies with empty token", - token: "", - ct: contentType, - status: http.StatusUnauthorized, - req: toJSON(validSingleDeleteReq), - }, - { - desc: "Delete policies with authorized access", - token: loginSecret, - ct: contentType, - status: http.StatusNoContent, - req: toJSON(validSingleDeleteReq), - }, - { - desc: "Delete multiple policies to multiple user", - token: loginSecret, - ct: contentType, - status: http.StatusNoContent, - req: toJSON(validMultipleDeleteReq), - }, - { - desc: "Delete policies with invalid content type", - token: loginSecret, - ct: "text/html", - status: http.StatusUnsupportedMediaType, - req: toJSON(validMultipleDeleteReq), - }, - { - desc: "Delete policies with empty content type", - token: loginSecret, - ct: "", - status: http.StatusUnsupportedMediaType, - req: toJSON(validMultipleDeleteReq), - }, - { - desc: "Delete policies with invalid object field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidObject), - }, - { - desc: "Delete policies with invalid policies field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidPolicies), - }, - { - desc: "Delete policies with invalid subjects field in request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: toJSON(invalidSubjects), - }, - { - desc: "Delete policies with empty request body", - token: loginSecret, - ct: contentType, - status: http.StatusBadRequest, - req: "", - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPut, - url: fmt.Sprintf("%s/policies", ts.URL), - contentType: tc.ct, - token: tc.token, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} diff --git a/auth/api/http/policies/requests.go b/auth/api/http/policies/requests.go deleted file mode 100644 index a97de7163b..0000000000 --- a/auth/api/http/policies/requests.go +++ /dev/null @@ -1,64 +0,0 @@ -package policies - -import "github.com/mainflux/mainflux/internal/apiutil" - -// Action represents an enum for the policies used in the Mainflux. -type Action int - -const ( - Create Action = iota - Read - Write - Delete - Access - Member - Unknown -) - -var actions = map[string]Action{ - "create": Create, - "read": Read, - "write": Write, - "delete": Delete, - "access": Access, - "member": Member, -} - -type policiesReq struct { - token string - SubjectIDs []string `json:"subjects"` - Policies []string `json:"policies"` - Object string `json:"object"` -} - -func (req policiesReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.SubjectIDs) == 0 { - return apiutil.ErrEmptyList - } - - if len(req.Policies) == 0 { - return apiutil.ErrEmptyList - } - - if req.Object == "" { - return apiutil.ErrMissingPolicyObj - } - - for _, policy := range req.Policies { - if _, ok := actions[policy]; !ok { - return apiutil.ErrMalformedPolicy - } - } - - for _, subID := range req.SubjectIDs { - if subID == "" { - return apiutil.ErrMissingPolicySub - } - } - - return nil -} diff --git a/auth/api/http/policies/responses.go b/auth/api/http/policies/responses.go deleted file mode 100644 index 27fd596ed8..0000000000 --- a/auth/api/http/policies/responses.go +++ /dev/null @@ -1,43 +0,0 @@ -package policies - -import "net/http" - -type createPolicyRes struct { - created bool -} - -func (res createPolicyRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res createPolicyRes) Headers() map[string]string { - return map[string]string{} -} - -func (res createPolicyRes) Empty() bool { - return false -} - -type deletePoliciesRes struct { - deleted bool -} - -func (res deletePoliciesRes) Code() int { - if res.deleted { - return http.StatusNoContent - } - - return http.StatusOK -} - -func (res deletePoliciesRes) Headers() map[string]string { - return map[string]string{} -} - -func (res deletePoliciesRes) Empty() bool { - return true -} diff --git a/auth/api/http/policies/transport.go b/auth/api/http/policies/transport.go deleted file mode 100644 index 423d95104c..0000000000 --- a/auth/api/http/policies/transport.go +++ /dev/null @@ -1,107 +0,0 @@ -package policies - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/opentracing/opentracing-go" -) - -const contentType = "application/json" - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc auth.Service, mux *bone.Mux, tracer opentracing.Tracer, logger logger.Logger) *bone.Mux { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - - mux.Post("/policies", kithttp.NewServer( - kitot.TraceServer(tracer, "create_policy_bulk")(createPolicyEndpoint(svc)), - decodePoliciesRequest, - encodeResponse, - opts..., - )) - - mux.Put("/policies", kithttp.NewServer( - kitot.TraceServer(tracer, "delete_policies")(deletePoliciesEndpoint(svc)), - decodePoliciesRequest, - encodeResponse, - opts..., - )) - - return mux -} - -func decodePoliciesRequest(ctx context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := policiesReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - w.Header().Set("Content-Type", contentType) - - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - case errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrEmptyList, - err == apiutil.ErrMissingPolicyObj, - err == apiutil.ErrMissingPolicySub, - err == apiutil.ErrMalformedPolicy: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrAuthentication), - err == apiutil.ErrBearerToken: - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - case errors.Contains(err, errors.ErrConflict): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrAuthorization): - w.WriteHeader(http.StatusForbidden) - case errors.Contains(err, auth.ErrMemberAlreadyAssigned): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/auth/api/http/transport.go b/auth/api/http/transport.go deleted file mode 100644 index 8e1f0a68fe..0000000000 --- a/auth/api/http/transport.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 -package http - -import ( - "net/http" - - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/auth/api/http/groups" - "github.com/mainflux/mainflux/auth/api/http/keys" - "github.com/mainflux/mainflux/auth/api/http/policies" - "github.com/mainflux/mainflux/logger" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc auth.Service, tracer opentracing.Tracer, logger logger.Logger) http.Handler { - mux := bone.New() - mux = keys.MakeHandler(svc, mux, tracer, logger) - mux = groups.MakeHandler(svc, mux, tracer, logger) - mux = policies.MakeHandler(svc, mux, tracer, logger) - mux.GetFunc("/health", mainflux.Health("auth")) - mux.Handle("/metrics", promhttp.Handler()) - return mux -} diff --git a/auth/api/logging.go b/auth/api/logging.go deleted file mode 100644 index cd6dda73cd..0000000000 --- a/auth/api/logging.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "fmt" - "time" - - "github.com/mainflux/mainflux/auth" - log "github.com/mainflux/mainflux/logger" -) - -var _ auth.Service = (*loggingMiddleware)(nil) - -type loggingMiddleware struct { - logger log.Logger - svc auth.Service -} - -// LoggingMiddleware adds logging facilities to the core service. -func LoggingMiddleware(svc auth.Service, logger log.Logger) auth.Service { - return &loggingMiddleware{logger, svc} -} - -func (lm *loggingMiddleware) ListPolicies(ctx context.Context, pr auth.PolicyReq) (p auth.PolicyPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_policies took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListPolicies(ctx, pr) -} - -func (lm *loggingMiddleware) Issue(ctx context.Context, token string, newKey auth.Key) (key auth.Key, secret string, err error) { - defer func(begin time.Time) { - d := "infinite duration" - if !key.ExpiresAt.IsZero() { - d = fmt.Sprintf("the key with expiration date %v", key.ExpiresAt) - } - message := fmt.Sprintf("Method issue for %s took %s to complete", d, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Issue(ctx, token, newKey) -} - -func (lm *loggingMiddleware) Revoke(ctx context.Context, token, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method revoke for key %s took %s to complete", id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Revoke(ctx, token, id) -} - -func (lm *loggingMiddleware) RetrieveKey(ctx context.Context, token, id string) (key auth.Key, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method retrieve for key %s took %s to complete", id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.RetrieveKey(ctx, token, id) -} - -func (lm *loggingMiddleware) RetrieveKeys(ctx context.Context, token string, pm auth.PageMetadata) (kp auth.KeyPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method retrieve for token %s took %s to complete", token, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.RetrieveKeys(ctx, token, pm) -} - -func (lm *loggingMiddleware) Identify(ctx context.Context, key string) (id auth.Identity, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method identify took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Identify(ctx, key) -} - -func (lm *loggingMiddleware) Authorize(ctx context.Context, pr auth.PolicyReq) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method authorize took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - return lm.svc.Authorize(ctx, pr) -} - -func (lm *loggingMiddleware) AddPolicy(ctx context.Context, pr auth.PolicyReq) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method add_policy took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - return lm.svc.AddPolicy(ctx, pr) -} - -func (lm *loggingMiddleware) AddPolicies(ctx context.Context, token, object string, subjectIDs, relations []string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method create_policy_bulk took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.AddPolicies(ctx, token, object, subjectIDs, relations) -} - -func (lm *loggingMiddleware) DeletePolicy(ctx context.Context, pr auth.PolicyReq) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method delete_policy took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - return lm.svc.DeletePolicy(ctx, pr) -} - -func (lm *loggingMiddleware) DeletePolicies(ctx context.Context, token, object string, subjectIDs, relations []string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method delete_policies took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - return lm.svc.DeletePolicies(ctx, token, object, subjectIDs, relations) -} - -func (lm *loggingMiddleware) CreateGroup(ctx context.Context, token string, group auth.Group) (g auth.Group, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method create_group for token %s and name %s took %s to complete", token, group.Name, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.CreateGroup(ctx, token, group) -} - -func (lm *loggingMiddleware) UpdateGroup(ctx context.Context, token string, group auth.Group) (gr auth.Group, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method update_group for token %s and name %s took %s to complete", token, group.Name, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.UpdateGroup(ctx, token, group) -} - -func (lm *loggingMiddleware) RemoveGroup(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method remove_group for token %s and id %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.RemoveGroup(ctx, token, id) -} - -func (lm *loggingMiddleware) ViewGroup(ctx context.Context, token, id string) (group auth.Group, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method view_group for token %s and id %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ViewGroup(ctx, token, id) -} - -func (lm *loggingMiddleware) ListGroups(ctx context.Context, token string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_groups for token %s took %s to complete", token, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListGroups(ctx, token, pm) -} - -func (lm *loggingMiddleware) ListChildren(ctx context.Context, token, parentID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_children for token %s and parent %s took %s to complete", token, parentID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListChildren(ctx, token, parentID, pm) -} - -func (lm *loggingMiddleware) ListParents(ctx context.Context, token, childID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_parents for token %s and child %s took for child %s to complete", token, childID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListParents(ctx, token, childID, pm) -} - -func (lm *loggingMiddleware) ListMembers(ctx context.Context, token, groupID, groupType string, pm auth.PageMetadata) (gp auth.MemberPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_members for token %s and group id %s took %s to complete", token, groupID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListMembers(ctx, token, groupID, groupType, pm) -} - -func (lm *loggingMiddleware) ListMemberships(ctx context.Context, token, memberID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_memberships for token %s and member id %s took %s to complete", token, memberID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListMemberships(ctx, token, memberID, pm) -} - -func (lm *loggingMiddleware) Assign(ctx context.Context, token, groupID, groupType string, memberIDs ...string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method assign for token %s and member %s group id %s took %s to complete", token, memberIDs, groupID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Assign(ctx, token, groupID, groupType, memberIDs...) -} - -func (lm *loggingMiddleware) Unassign(ctx context.Context, token string, groupID string, memberIDs ...string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method unassign for token %s and member %s group id %s took %s to complete", token, memberIDs, groupID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Unassign(ctx, token, groupID, memberIDs...) -} - -func (lm *loggingMiddleware) AssignGroupAccessRights(ctx context.Context, token, thingGroupID, userGroupID string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method share_group_access took %s to complete", time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.AssignGroupAccessRights(ctx, token, thingGroupID, userGroupID) -} diff --git a/auth/api/metrics.go b/auth/api/metrics.go deleted file mode 100644 index 7cad9f770a..0000000000 --- a/auth/api/metrics.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/mainflux/mainflux/auth" -) - -var _ auth.Service = (*metricsMiddleware)(nil) - -type metricsMiddleware struct { - counter metrics.Counter - latency metrics.Histogram - svc auth.Service -} - -// MetricsMiddleware instruments core service by tracking request count and latency. -func MetricsMiddleware(svc auth.Service, counter metrics.Counter, latency metrics.Histogram) auth.Service { - return &metricsMiddleware{ - counter: counter, - latency: latency, - svc: svc, - } -} - -func (ms *metricsMiddleware) ListPolicies(ctx context.Context, pr auth.PolicyReq) (p auth.PolicyPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_policies").Add(1) - ms.latency.With("method", "list_policies").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListPolicies(ctx, pr) -} - -func (ms *metricsMiddleware) Issue(ctx context.Context, token string, key auth.Key) (auth.Key, string, error) { - defer func(begin time.Time) { - ms.counter.With("method", "issue_key").Add(1) - ms.latency.With("method", "issue_key").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Issue(ctx, token, key) -} - -func (ms *metricsMiddleware) Revoke(ctx context.Context, token, id string) error { - defer func(begin time.Time) { - ms.counter.With("method", "revoke_key").Add(1) - ms.latency.With("method", "revoke_key").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Revoke(ctx, token, id) -} - -func (ms *metricsMiddleware) RetrieveKey(ctx context.Context, token, id string) (auth.Key, error) { - defer func(begin time.Time) { - ms.counter.With("method", "retrieve_key").Add(1) - ms.latency.With("method", "retrieve_key").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.RetrieveKey(ctx, token, id) -} - -func (ms *metricsMiddleware) RetrieveKeys(ctx context.Context, token string, pm auth.PageMetadata) (auth.KeyPage, error) { - defer func(begin time.Time) { - ms.counter.With("method", "retrieve_keys").Add(1) - ms.latency.With("method", "retrieve_keys").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.RetrieveKeys(ctx, token, pm) -} - -func (ms *metricsMiddleware) Identify(ctx context.Context, token string) (auth.Identity, error) { - defer func(begin time.Time) { - ms.counter.With("method", "identify").Add(1) - ms.latency.With("method", "identify").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Identify(ctx, token) -} - -func (ms *metricsMiddleware) Authorize(ctx context.Context, pr auth.PolicyReq) error { - defer func(begin time.Time) { - ms.counter.With("method", "authorize").Add(1) - ms.latency.With("method", "authorize").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.Authorize(ctx, pr) -} - -func (ms *metricsMiddleware) AddPolicy(ctx context.Context, pr auth.PolicyReq) error { - defer func(begin time.Time) { - ms.counter.With("method", "add_policy").Add(1) - ms.latency.With("method", "add_policy").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.AddPolicy(ctx, pr) -} - -func (ms *metricsMiddleware) AddPolicies(ctx context.Context, token, object string, subjectIDs, relations []string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "create_policy_bulk").Add(1) - ms.latency.With("method", "create_policy_bulk").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.AddPolicies(ctx, token, object, subjectIDs, relations) -} - -func (ms *metricsMiddleware) DeletePolicy(ctx context.Context, pr auth.PolicyReq) error { - defer func(begin time.Time) { - ms.counter.With("method", "delete_policy").Add(1) - ms.latency.With("method", "delete_policy").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.DeletePolicy(ctx, pr) -} - -func (ms *metricsMiddleware) DeletePolicies(ctx context.Context, token, object string, subjectIDs, relations []string) error { - defer func(begin time.Time) { - ms.counter.With("method", "delete_policies").Add(1) - ms.latency.With("method", "delete_policies").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.DeletePolicies(ctx, token, object, subjectIDs, relations) -} - -func (ms *metricsMiddleware) CreateGroup(ctx context.Context, token string, group auth.Group) (gr auth.Group, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "create_group").Add(1) - ms.latency.With("method", "create_group").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.CreateGroup(ctx, token, group) -} - -func (ms *metricsMiddleware) UpdateGroup(ctx context.Context, token string, group auth.Group) (gr auth.Group, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "update_group").Add(1) - ms.latency.With("method", "update_group").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.UpdateGroup(ctx, token, group) -} - -func (ms *metricsMiddleware) RemoveGroup(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "remove_group").Add(1) - ms.latency.With("method", "remove_group").Observe(time.Since(begin).Seconds()) - }(time.Now()) - return ms.svc.RemoveGroup(ctx, token, id) -} - -func (ms *metricsMiddleware) ViewGroup(ctx context.Context, token, id string) (group auth.Group, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "view_group").Add(1) - ms.latency.With("method", "view_group").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ViewGroup(ctx, token, id) -} - -func (ms *metricsMiddleware) ListGroups(ctx context.Context, token string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_groups").Add(1) - ms.latency.With("method", "list_groups").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListGroups(ctx, token, pm) -} - -func (ms *metricsMiddleware) ListParents(ctx context.Context, token, childID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "parents").Add(1) - ms.latency.With("method", "parents").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListParents(ctx, token, childID, pm) -} - -func (ms *metricsMiddleware) ListChildren(ctx context.Context, token, parentID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_children").Add(1) - ms.latency.With("method", "list_children").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListChildren(ctx, token, parentID, pm) -} - -func (ms *metricsMiddleware) ListMembers(ctx context.Context, token, groupID, groupType string, pm auth.PageMetadata) (gp auth.MemberPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_members").Add(1) - ms.latency.With("method", "list_members").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListMembers(ctx, token, groupID, groupType, pm) -} - -func (ms *metricsMiddleware) ListMemberships(ctx context.Context, token, memberID string, pm auth.PageMetadata) (gp auth.GroupPage, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_memberships").Add(1) - ms.latency.With("method", "list_memberships").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListMemberships(ctx, token, memberID, pm) -} - -func (ms *metricsMiddleware) Assign(ctx context.Context, token, groupID, groupType string, memberIDs ...string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "assign").Add(1) - ms.latency.With("method", "assign").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Assign(ctx, token, groupID, groupType, memberIDs...) -} - -func (ms *metricsMiddleware) Unassign(ctx context.Context, token, groupID string, memberIDs ...string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "unassign").Add(1) - ms.latency.With("method", "unassign").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Unassign(ctx, token, groupID, memberIDs...) -} - -func (ms *metricsMiddleware) AssignGroupAccessRights(ctx context.Context, token, thingGroupID, userGroupID string) error { - defer func(begin time.Time) { - ms.counter.With("method", "share_group_access").Add(1) - ms.latency.With("method", "share_group_access").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.AssignGroupAccessRights(ctx, token, thingGroupID, userGroupID) -} diff --git a/auth/groups.go b/auth/groups.go deleted file mode 100644 index ca7b93547b..0000000000 --- a/auth/groups.go +++ /dev/null @@ -1,163 +0,0 @@ -package auth - -import ( - "context" - "errors" - "time" -) - -const ( - // MaxLevel represents the maximum group hierarchy level. - MaxLevel = uint64(5) - // MinLevel represents the minimum group hierarchy level. - MinLevel = uint64(1) -) - -var ( - // ErrAssignToGroup indicates failure to assign member to a group. - ErrAssignToGroup = errors.New("failed to assign member to a group") - - // ErrUnassignFromGroup indicates failure to unassign member from a group. - ErrUnassignFromGroup = errors.New("failed to unassign member from a group") - - // ErrMissingParent indicates that parent can't be found - ErrMissingParent = errors.New("failed to retrieve parent") - - // ErrGroupNotEmpty indicates group is not empty, can't be deleted. - ErrGroupNotEmpty = errors.New("group is not empty") - - // ErrMemberAlreadyAssigned indicates that members is already assigned. - ErrMemberAlreadyAssigned = errors.New("member is already assigned") -) - -// GroupMetadata defines the Metadata type. -type GroupMetadata map[string]interface{} - -// Member represents the member information. -type Member struct { - ID string - Type string -} - -// Group represents the group information. -type Group struct { - ID string - OwnerID string - ParentID string - Name string - Description string - Metadata GroupMetadata - // Indicates a level in tree hierarchy. - // Root node is level 1. - Level int - // Path in a tree consisting of group ids - // parentID1.parentID2.childID1 - // e.g. 01EXPM5Z8HRGFAEWTETR1X1441.01EXPKW2TVK74S5NWQ979VJ4PJ.01EXPKW2TVK74S5NWQ979VJ4PJ - Path string - Children []*Group - CreatedAt time.Time - UpdatedAt time.Time -} - -// PageMetadata contains page metadata that helps navigation. -type PageMetadata struct { - Total uint64 - Offset uint64 - Limit uint64 - Size uint64 - Level uint64 - Name string - Type uint32 - Subject string - Metadata GroupMetadata -} - -// GroupPage contains page related metadata as well as list of groups that -// belong to this page. -type GroupPage struct { - PageMetadata - Groups []Group -} - -// MemberPage contains page related metadata as well as list of members that -// belong to this page. -type MemberPage struct { - PageMetadata - Members []Member -} - -// GroupService specifies an API that must be fullfiled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -type GroupService interface { - // CreateGroup creates new group. - CreateGroup(ctx context.Context, token string, g Group) (Group, error) - - // UpdateGroup updates the group identified by the provided ID. - UpdateGroup(ctx context.Context, token string, g Group) (Group, error) - - // ViewGroup retrieves data about the group identified by ID. - ViewGroup(ctx context.Context, token, id string) (Group, error) - - // ListGroups retrieves groups. - ListGroups(ctx context.Context, token string, pm PageMetadata) (GroupPage, error) - - // ListChildren retrieves groups that are children to group identified by parentID - ListChildren(ctx context.Context, token, parentID string, pm PageMetadata) (GroupPage, error) - - // ListParents retrieves groups that are parent to group identified by childID. - ListParents(ctx context.Context, token, childID string, pm PageMetadata) (GroupPage, error) - - // ListMembers retrieves everything that is assigned to a group identified by groupID. - ListMembers(ctx context.Context, token, groupID, groupType string, pm PageMetadata) (MemberPage, error) - - // ListMemberships retrieves all groups for member that is identified with memberID belongs to. - ListMemberships(ctx context.Context, token, memberID string, pm PageMetadata) (GroupPage, error) - - // RemoveGroup removes the group identified with the provided ID. - RemoveGroup(ctx context.Context, token, id string) error - - // Assign adds a member with memberID into the group identified by groupID. - Assign(ctx context.Context, token, groupID, groupType string, memberIDs ...string) error - - // Unassign removes member with memberID from group identified by groupID. - Unassign(ctx context.Context, token, groupID string, memberIDs ...string) error - - // AssignGroupAccessRights adds access rights on thing groups to user group. - AssignGroupAccessRights(ctx context.Context, token, thingGroupID, userGroupID string) error -} - -// GroupRepository specifies a group persistence API. -type GroupRepository interface { - // Save group - Save(ctx context.Context, g Group) (Group, error) - - // Update a group - Update(ctx context.Context, g Group) (Group, error) - - // Delete a group - Delete(ctx context.Context, id string) error - - // RetrieveByID retrieves group by its id - RetrieveByID(ctx context.Context, id string) (Group, error) - - // RetrieveAll retrieves all groups. - RetrieveAll(ctx context.Context, pm PageMetadata) (GroupPage, error) - - // RetrieveAllParents retrieves all groups that are ancestors to the group with given groupID. - RetrieveAllParents(ctx context.Context, groupID string, pm PageMetadata) (GroupPage, error) - - // RetrieveAllChildren retrieves all children from group with given groupID up to the hierarchy level. - RetrieveAllChildren(ctx context.Context, groupID string, pm PageMetadata) (GroupPage, error) - - // Retrieves list of groups that member belongs to - Memberships(ctx context.Context, memberID string, pm PageMetadata) (GroupPage, error) - - // Members retrieves everything that is assigned to a group identified by groupID. - Members(ctx context.Context, groupID, groupType string, pm PageMetadata) (MemberPage, error) - - // Assign adds a member to group. - Assign(ctx context.Context, groupID, groupType string, memberIDs ...string) error - - // Unassign removes a member from a group - Unassign(ctx context.Context, groupID string, memberIDs ...string) error -} diff --git a/auth/jwt/token_test.go b/auth/jwt/token_test.go deleted file mode 100644 index ada6dae880..0000000000 --- a/auth/jwt/token_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package jwt_test - -import ( - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const secret = "test" - -func key() auth.Key { - exp := time.Now().UTC().Add(10 * time.Minute).Round(time.Second) - return auth.Key{ - ID: "id", - Type: auth.LoginKey, - Subject: "user@email.com", - IssuerID: "", - IssuedAt: time.Now().UTC().Add(-10 * time.Second).Round(time.Second), - ExpiresAt: exp, - } -} - -func TestIssue(t *testing.T) { - tokenizer := jwt.New(secret) - - cases := []struct { - desc string - key auth.Key - err error - }{ - { - desc: "issue new token", - key: key(), - err: nil, - }, - } - - for _, tc := range cases { - _, err := tokenizer.Issue(tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestParse(t *testing.T) { - tokenizer := jwt.New(secret) - - token, err := tokenizer.Issue(key()) - require.Nil(t, err, fmt.Sprintf("issuing key expected to succeed: %s", err)) - - apiKey := key() - apiKey.Type = auth.APIKey - apiKey.ExpiresAt = time.Now().UTC().Add(-1 * time.Minute).Round(time.Second) - apiToken, err := tokenizer.Issue(apiKey) - require.Nil(t, err, fmt.Sprintf("issuing user key expected to succeed: %s", err)) - - expKey := key() - expKey.ExpiresAt = time.Now().UTC().Add(-1 * time.Minute).Round(time.Second) - expToken, err := tokenizer.Issue(expKey) - require.Nil(t, err, fmt.Sprintf("issuing expired key expected to succeed: %s", err)) - - cases := []struct { - desc string - key auth.Key - token string - err error - }{ - { - desc: "parse valid key", - key: key(), - token: token, - err: nil, - }, - { - desc: "parse ivalid key", - key: auth.Key{}, - token: "invalid", - err: errors.ErrAuthentication, - }, - { - desc: "parse expired key", - key: auth.Key{}, - token: expToken, - err: auth.ErrKeyExpired, - }, - { - desc: "parse expired API key", - key: apiKey, - token: apiToken, - err: auth.ErrAPIKeyExpired, - }, - } - - for _, tc := range cases { - key, err := tokenizer.Parse(tc.token) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.key, key, fmt.Sprintf("%s expected %v, got %v", tc.desc, tc.key, key)) - } -} diff --git a/auth/jwt/tokenizer.go b/auth/jwt/tokenizer.go deleted file mode 100644 index c39dd37784..0000000000 --- a/auth/jwt/tokenizer.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package jwt - -import ( - "time" - - "github.com/golang-jwt/jwt/v4" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" -) - -const issuerName = "mainflux.auth" - -type claims struct { - jwt.RegisteredClaims - IssuerID string `json:"issuer_id,omitempty"` - Type *uint32 `json:"type,omitempty"` -} - -func (c claims) Valid() error { - if c.Type == nil || *c.Type > auth.APIKey || c.Issuer != issuerName { - return errors.ErrMalformedEntity - } - - return c.RegisteredClaims.Valid() -} - -type tokenizer struct { - secret string -} - -// New returns new JWT Tokenizer. -func New(secret string) auth.Tokenizer { - return tokenizer{secret: secret} -} - -func (svc tokenizer) Issue(key auth.Key) (string, error) { - claims := claims{ - RegisteredClaims: jwt.RegisteredClaims{ - Issuer: issuerName, - Subject: key.Subject, - IssuedAt: &jwt.NumericDate{Time: key.IssuedAt.UTC()}, - }, - IssuerID: key.IssuerID, - Type: &key.Type, - } - - if !key.ExpiresAt.IsZero() { - claims.ExpiresAt = &jwt.NumericDate{Time: key.ExpiresAt.UTC()} - } - if key.ID != "" { - claims.ID = key.ID - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - return token.SignedString([]byte(svc.secret)) -} - -func (svc tokenizer) Parse(token string) (auth.Key, error) { - c := claims{} - _, err := jwt.ParseWithClaims(token, &c, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, errors.ErrAuthentication - } - return []byte(svc.secret), nil - }) - - if err != nil { - if e, ok := err.(*jwt.ValidationError); ok && e.Errors == jwt.ValidationErrorExpired { - // Expired User key needs to be revoked. - - if c.Type != nil && *c.Type == auth.APIKey { - return c.toKey(), auth.ErrAPIKeyExpired - } - return auth.Key{}, errors.Wrap(auth.ErrKeyExpired, err) - } - return auth.Key{}, errors.Wrap(errors.ErrAuthentication, err) - } - - return c.toKey(), nil -} - -func (c claims) toKey() auth.Key { - key := auth.Key{ - ID: c.ID, - IssuerID: c.IssuerID, - Subject: c.Subject, - IssuedAt: c.IssuedAt.Time.UTC(), - } - - key.ExpiresAt = time.Time{} - if c.ExpiresAt != nil && c.ExpiresAt.Time.UTC().Unix() != 0 { - key.ExpiresAt = c.ExpiresAt.Time.UTC() - } - - // Default type is 0. - if c.Type != nil { - key.Type = *(c.Type) - } - - return key -} diff --git a/auth/keto/doc.go b/auth/keto/doc.go deleted file mode 100644 index bbb95a7df5..0000000000 --- a/auth/keto/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package keto contains PolicyAgent implementation using Keto. -package keto diff --git a/auth/keto/policies.go b/auth/keto/policies.go deleted file mode 100644 index d2ce28b67f..0000000000 --- a/auth/keto/policies.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package keto - -import ( - "context" - "regexp" - "strings" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" - acl "github.com/ory/keto/proto/ory/keto/acl/v1alpha1" -) - -const ( - subjectSetRegex = "^.{1,}:.{1,}#.{1,}$" // expected subject set structure is :# - ketoNamespace = "members" -) - -type policyAgent struct { - writer acl.WriteServiceClient - checker acl.CheckServiceClient - reader acl.ReadServiceClient -} - -// NewPolicyAgent returns a gRPC communication functionalities -// to communicate with ORY Keto. -func NewPolicyAgent(checker acl.CheckServiceClient, writer acl.WriteServiceClient, reader acl.ReadServiceClient) auth.PolicyAgent { - return policyAgent{checker: checker, writer: writer, reader: reader} -} - -func (pa policyAgent) CheckPolicy(ctx context.Context, pr auth.PolicyReq) error { - res, err := pa.checker.Check(context.Background(), &acl.CheckRequest{ - Namespace: ketoNamespace, - Object: pr.Object, - Relation: pr.Relation, - Subject: getSubject(pr), - }) - if err != nil { - return errors.Wrap(err, errors.ErrAuthorization) - } - if !res.GetAllowed() { - return errors.ErrAuthorization - } - return nil -} - -func (pa policyAgent) AddPolicy(ctx context.Context, pr auth.PolicyReq) error { - var ss *acl.Subject - switch isSubjectSet(pr.Subject) { - case true: - namespace, object, relation := parseSubjectSet(pr.Subject) - ss = &acl.Subject{ - Ref: &acl.Subject_Set{Set: &acl.SubjectSet{Namespace: namespace, Object: object, Relation: relation}}, - } - default: - ss = &acl.Subject{Ref: &acl.Subject_Id{Id: pr.Subject}} - } - - trt := pa.writer.TransactRelationTuples - _, err := trt(context.Background(), &acl.TransactRelationTuplesRequest{ - RelationTupleDeltas: []*acl.RelationTupleDelta{ - { - Action: acl.RelationTupleDelta_INSERT, - RelationTuple: &acl.RelationTuple{ - Namespace: ketoNamespace, - Object: pr.Object, - Relation: pr.Relation, - Subject: ss, - }, - }, - }, - }) - return err -} - -func (pa policyAgent) DeletePolicy(ctx context.Context, pr auth.PolicyReq) error { - trt := pa.writer.TransactRelationTuples - _, err := trt(context.Background(), &acl.TransactRelationTuplesRequest{ - RelationTupleDeltas: []*acl.RelationTupleDelta{ - { - Action: acl.RelationTupleDelta_DELETE, - RelationTuple: &acl.RelationTuple{ - Namespace: ketoNamespace, - Object: pr.Object, - Relation: pr.Relation, - Subject: &acl.Subject{Ref: &acl.Subject_Id{ - Id: pr.Subject, - }}, - }, - }, - }, - }) - return err -} - -func (pa policyAgent) RetrievePolicies(ctx context.Context, pr auth.PolicyReq) ([]*acl.RelationTuple, error) { - var ss *acl.Subject - switch isSubjectSet(pr.Subject) { - case true: - namespace, object, relation := parseSubjectSet(pr.Subject) - ss = &acl.Subject{ - Ref: &acl.Subject_Set{Set: &acl.SubjectSet{Namespace: namespace, Object: object, Relation: relation}}, - } - default: - ss = &acl.Subject{Ref: &acl.Subject_Id{Id: pr.Subject}} - } - - res, err := pa.reader.ListRelationTuples(ctx, &acl.ListRelationTuplesRequest{ - Query: &acl.ListRelationTuplesRequest_Query{ - Namespace: ketoNamespace, - Relation: pr.Relation, - Subject: ss, - }, - }) - if err != nil { - return []*acl.RelationTuple{}, err - } - - tuple := res.GetRelationTuples() - for res.NextPageToken != "" { - tuple = append(tuple, res.GetRelationTuples()...) - } - - return tuple, nil -} - -// getSubject returns a 'subject' field for ACL(access control lists). -// If the given PolicyReq argument contains a subject as subject set, -// it returns subject set; otherwise, it returns a subject. -func getSubject(pr auth.PolicyReq) *acl.Subject { - if isSubjectSet(pr.Subject) { - return &acl.Subject{ - Ref: &acl.Subject_Set{Set: &acl.SubjectSet{ - Namespace: ketoNamespace, - Object: pr.Object, - Relation: pr.Relation, - }}, - } - } - - return &acl.Subject{Ref: &acl.Subject_Id{Id: pr.Subject}} -} - -// isSubjectSet returns true when given subject is subject set. -// Otherwise, it returns false. -func isSubjectSet(subject string) bool { - r, err := regexp.Compile(subjectSetRegex) - if err != nil { - return false - } - return r.MatchString(subject) -} - -func parseSubjectSet(subjectSet string) (namespace, object, relation string) { - r := strings.Split(subjectSet, ":") - if len(r) != 2 { - return - } - namespace = r[0] - - r = strings.Split(r[1], "#") - if len(r) != 2 { - return - } - - object = r[0] - relation = r[1] - - return -} diff --git a/auth/keto/policies_test.go b/auth/keto/policies_test.go deleted file mode 100644 index aab65aaac8..0000000000 --- a/auth/keto/policies_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package keto - -import ( - "fmt" - "testing" - - "github.com/mainflux/mainflux/auth" - acl "github.com/ory/keto/proto/ory/keto/acl/v1alpha1" - "github.com/stretchr/testify/assert" -) - -func TestIsSubjectSet(t *testing.T) { - cases := []struct { - desc string - subjectSet string - result bool - }{ - { - desc: "check valid subject set", - subjectSet: "namespace:object#relation", - result: true, - }, - { - desc: "check invalid subject set, missing namespace field", - subjectSet: ":object#relation", - result: false, - }, - { - desc: "check invalid subject set, missing object field", - subjectSet: "namespace:#relation", - result: false, - }, - { - desc: "check invalid subject set, missing relation field", - subjectSet: "namespace:object#", - result: false, - }, - { - desc: "check invalid subject set, empty subject set", - subjectSet: ":#", - result: false, - }, - { - desc: "check invalid subject set, missing subject set identifier", - subjectSet: "namespace:#relation", - result: false, - }, - { - desc: "check invalid subject set, missing object field", - subjectSet: "namespace:object", - result: false, - }, - { - desc: "check invalid subject set, unexpected object field", - subjectSet: "namespace:object@relation", - result: false, - }, - } - - for _, tc := range cases { - iss := isSubjectSet(tc.subjectSet) - assert.Equal(t, iss, tc.result, fmt.Sprintf("%s expected to be %v, got %v\n", tc.desc, tc.result, iss)) - } - -} - -func TestGetSubject(t *testing.T) { - p1 := auth.PolicyReq{Subject: "subject", Object: "object", Relation: "relation"} - s1 := getSubject(p1) - ref1 := s1.GetRef() - _, ok := ref1.(*acl.Subject_Id) - assert.True(t, ok, fmt.Errorf("subject reference of %#v is expected to be (*acl.Subject_Id), got %T", p1, ref1)) - - p2 := auth.PolicyReq{Subject: "members:group#access", Object: "object", Relation: "relation"} - s2 := getSubject(p2) - ref2 := s2.GetRef() - _, ok = ref2.(*acl.Subject_Set) - assert.True(t, ok, fmt.Errorf("subject reference of %#v is expected to be (*acl.Subject_Set), got %T", p2, ref2)) -} diff --git a/auth/keys.go b/auth/keys.go deleted file mode 100644 index 7ea069d9cf..0000000000 --- a/auth/keys.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth - -import ( - "context" - "errors" - "time" -) - -var ( - // ErrInvalidKeyIssuedAt indicates that the Key is being used before it's issued. - ErrInvalidKeyIssuedAt = errors.New("invalid issue time") - - // ErrKeyExpired indicates that the Key is expired. - ErrKeyExpired = errors.New("use of expired key") - - // ErrAPIKeyExpired indicates that the Key is expired - // and that the key type is API key. - ErrAPIKeyExpired = errors.New("use of expired API key") -) - -const ( - // LoginKey is temporary User key received on successful login. - LoginKey uint32 = iota - // RecoveryKey represents a key for resseting password. - RecoveryKey - // APIKey enables the one to act on behalf of the user. - APIKey -) - -// Key represents API key. -type Key struct { - ID string - Type uint32 - IssuerID string - Subject string - IssuedAt time.Time - ExpiresAt time.Time -} - -// KeyPage contains a page of keys. -type KeyPage struct { - PageMetadata - Keys []Key -} - -// Identity contains ID and Email. -type Identity struct { - ID string - Email string -} - -// Expired verifies if the key is expired. -func (k Key) Expired() bool { - if k.Type == APIKey && k.ExpiresAt.IsZero() { - return false - } - return k.ExpiresAt.UTC().Before(time.Now().UTC()) -} - -// KeyRepository specifies Key persistence API. -type KeyRepository interface { - // Save persists the Key. A non-nil error is returned to indicate - // operation failure - Save(context.Context, Key) (string, error) - - // RetrieveByID retrieves Key by its unique identifier. - RetrieveByID(context.Context, string, string) (Key, error) - - // RetrieveAll retrieves all keys for given user ID. - RetrieveAll(context.Context, string, PageMetadata) (KeyPage, error) - - // Remove removes Key with provided ID. - Remove(context.Context, string, string) error -} diff --git a/auth/keys_test.go b/auth/keys_test.go deleted file mode 100644 index 916cc07fe0..0000000000 --- a/auth/keys_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth_test - -import ( - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/stretchr/testify/assert" -) - -func TestExpired(t *testing.T) { - exp := time.Now().Add(5 * time.Minute) - exp1 := time.Now() - cases := []struct { - desc string - key auth.Key - expired bool - }{ - { - desc: "not expired key", - key: auth.Key{ - IssuedAt: time.Now(), - ExpiresAt: exp, - }, - expired: false, - }, - { - desc: "expired key", - key: auth.Key{ - IssuedAt: time.Now().UTC().Add(2 * time.Minute), - ExpiresAt: exp1, - }, - expired: true, - }, - { - desc: "user key with no expiration date", - key: auth.Key{ - IssuedAt: time.Now(), - }, - expired: true, - }, - { - desc: "API key with no expiration date", - key: auth.Key{ - IssuedAt: time.Now(), - Type: auth.APIKey, - }, - expired: false, - }, - } - - for _, tc := range cases { - res := tc.key.Expired() - assert.Equal(t, tc.expired, res, fmt.Sprintf("%s: expected %t got %t\n", tc.desc, tc.expired, res)) - } -} diff --git a/auth/mocks/groups.go b/auth/mocks/groups.go deleted file mode 100644 index 4f2de21636..0000000000 --- a/auth/mocks/groups.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" -) - -var _ auth.GroupRepository = (*groupRepositoryMock)(nil) - -type groupRepositoryMock struct { - mu sync.Mutex - // Map of groups, group id as a key. - // groups map[GroupID]auth.Group - groups map[string]auth.Group - // Map of groups with group id as key that are - // children (i.e. has same parent id) is element - // in children's map where parent id is key. - // children map[ParentID]map[GroupID]auth.Group - children map[string]map[string]auth.Group - // Map of parents' id with child group id as key. - // Each child has one parent. - // parents map[ChildID]ParentID - parents map[string]string - // Map of groups (with group id as key) which - // represent memberships is element in - // memberships' map where member id is a key. - // memberships map[MemberID]map[GroupID]auth.Group - memberships map[string]map[string]auth.Group - // Map of group members where member id is a key - // is an element in the map members where group id is a key. - // members map[type][GroupID]map[MemberID]MemberID - members map[string]map[string]map[string]string -} - -// NewGroupRepository creates in-memory user repository -func NewGroupRepository() auth.GroupRepository { - return &groupRepositoryMock{ - groups: make(map[string]auth.Group), - children: make(map[string]map[string]auth.Group), - parents: make(map[string]string), - memberships: make(map[string]map[string]auth.Group), - members: make(map[string]map[string]map[string]string), - } -} - -func (grm *groupRepositoryMock) Save(ctx context.Context, group auth.Group) (auth.Group, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - if _, ok := grm.groups[group.ID]; ok { - return auth.Group{}, errors.ErrConflict - } - path := group.ID - - if group.ParentID != "" { - parent, ok := grm.groups[group.ParentID] - if !ok { - return auth.Group{}, errors.ErrCreateEntity - } - if _, ok := grm.children[group.ParentID]; !ok { - grm.children[group.ParentID] = make(map[string]auth.Group) - } - grm.children[group.ParentID][group.ID] = group - grm.parents[group.ID] = group.ParentID - path = fmt.Sprintf("%s.%s", parent.Path, path) - } - - group.Path = path - group.Level = len(strings.Split(path, ".")) - - grm.groups[group.ID] = group - return group, nil -} - -func (grm *groupRepositoryMock) Update(ctx context.Context, group auth.Group) (auth.Group, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - up, ok := grm.groups[group.ID] - if !ok { - return auth.Group{}, errors.ErrNotFound - } - up.Name = group.Name - up.Description = group.Description - up.Metadata = group.Metadata - up.UpdatedAt = time.Now() - - grm.groups[group.ID] = up - return up, nil -} - -func (grm *groupRepositoryMock) Delete(ctx context.Context, id string) error { - grm.mu.Lock() - defer grm.mu.Unlock() - if _, ok := grm.groups[id]; !ok { - return errors.ErrNotFound - } - - if len(grm.members[id]) > 0 { - return auth.ErrGroupNotEmpty - } - - // This is not quite exact, it should go in depth - for _, ch := range grm.children[id] { - if len(grm.members[ch.ID]) > 0 { - return auth.ErrGroupNotEmpty - } - } - - // This is not quite exact, it should go in depth - delete(grm.groups, id) - for _, ch := range grm.children[id] { - delete(grm.members, ch.ID) - } - - delete(grm.children, id) - - return nil - -} - -func (grm *groupRepositoryMock) RetrieveByID(ctx context.Context, id string) (auth.Group, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - - val, ok := grm.groups[id] - if !ok { - return auth.Group{}, errors.ErrNotFound - } - return val, nil -} - -func (grm *groupRepositoryMock) RetrieveAll(ctx context.Context, pm auth.PageMetadata) (auth.GroupPage, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - var items []auth.Group - for _, g := range grm.groups { - items = append(items, g) - } - return auth.GroupPage{ - Groups: items, - PageMetadata: auth.PageMetadata{ - Total: uint64(len(items)), - }, - }, nil -} - -func (grm *groupRepositoryMock) Unassign(ctx context.Context, groupID string, memberIDs ...string) error { - grm.mu.Lock() - defer grm.mu.Unlock() - if _, ok := grm.groups[groupID]; !ok { - return errors.ErrNotFound - } - for _, memberID := range memberIDs { - for typ, m := range grm.members[groupID] { - _, ok := m[memberID] - if !ok { - return errors.ErrNotFound - } - delete(grm.members[groupID][typ], memberID) - delete(grm.memberships[memberID], groupID) - } - - } - return nil -} - -func (grm *groupRepositoryMock) Assign(ctx context.Context, groupID, groupType string, memberIDs ...string) error { - grm.mu.Lock() - defer grm.mu.Unlock() - if _, ok := grm.groups[groupID]; !ok { - return errors.ErrNotFound - } - - if _, ok := grm.members[groupID]; !ok { - grm.members[groupID] = make(map[string]map[string]string) - } - - for _, memberID := range memberIDs { - if _, ok := grm.members[groupID][groupType]; !ok { - grm.members[groupID][groupType] = make(map[string]string) - } - if _, ok := grm.memberships[memberID]; !ok { - grm.memberships[memberID] = make(map[string]auth.Group) - } - - grm.members[groupID][groupType][memberID] = memberID - grm.memberships[memberID][groupID] = grm.groups[groupID] - } - return nil - -} - -func (grm *groupRepositoryMock) Memberships(ctx context.Context, memberID string, pm auth.PageMetadata) (auth.GroupPage, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - var items []auth.Group - - first := uint64(pm.Offset) - last := first + uint64(pm.Limit) - - i := uint64(0) - for _, g := range grm.memberships[memberID] { - if i >= first && i < last { - items = append(items, g) - } - i++ - } - - return auth.GroupPage{ - Groups: items, - PageMetadata: auth.PageMetadata{ - Limit: pm.Limit, - Offset: pm.Offset, - Total: uint64(len(items)), - }, - }, nil -} - -func (grm *groupRepositoryMock) Members(ctx context.Context, groupID, groupType string, pm auth.PageMetadata) (auth.MemberPage, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - var items []auth.Member - members, ok := grm.members[groupID][groupType] - if !ok { - return auth.MemberPage{}, errors.ErrNotFound - } - - first := uint64(pm.Offset) - last := first + uint64(pm.Limit) - - i := uint64(0) - for _, g := range members { - if i >= first && i < last { - items = append(items, auth.Member{ID: g, Type: groupType}) - } - i++ - } - return auth.MemberPage{ - Members: items, - PageMetadata: auth.PageMetadata{ - Total: uint64(len(items)), - }, - }, nil -} - -func (grm *groupRepositoryMock) RetrieveAllParents(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - if groupID == "" { - return auth.GroupPage{}, nil - } - - group, ok := grm.groups[groupID] - if !ok { - return auth.GroupPage{}, errors.ErrNotFound - } - - groups := make([]auth.Group, 0) - groups, err := grm.getParents(groups, group) - if err != nil { - return auth.GroupPage{}, err - } - - return auth.GroupPage{ - Groups: groups, - PageMetadata: auth.PageMetadata{ - Total: uint64(len(groups)), - }, - }, nil -} - -func (grm *groupRepositoryMock) getParents(groups []auth.Group, group auth.Group) ([]auth.Group, error) { - groups = append(groups, group) - parentID, ok := grm.parents[group.ID] - if !ok && parentID == "" { - return groups, nil - } - parent, ok := grm.groups[parentID] - if !ok { - panic(fmt.Sprintf("parent with id: %s not found", parentID)) - } - return grm.getParents(groups, parent) -} - -func (grm *groupRepositoryMock) RetrieveAllChildren(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - grm.mu.Lock() - defer grm.mu.Unlock() - group, ok := grm.groups[groupID] - if !ok { - return auth.GroupPage{}, nil - } - - groups := make([]auth.Group, 0) - groups = append(groups, group) - for ch := range grm.parents { - g, ok := grm.groups[ch] - if !ok { - panic(fmt.Sprintf("child with id %s not found", ch)) - } - groups = append(groups, g) - } - - return auth.GroupPage{ - Groups: groups, - PageMetadata: auth.PageMetadata{ - Total: uint64(len(groups)), - Offset: pm.Offset, - Limit: pm.Limit, - }, - }, nil -} diff --git a/auth/mocks/keys.go b/auth/mocks/keys.go deleted file mode 100644 index d8a0bc9f41..0000000000 --- a/auth/mocks/keys.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "sync" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" -) - -var _ auth.KeyRepository = (*keyRepositoryMock)(nil) - -type keyRepositoryMock struct { - mu sync.Mutex - keys map[string]auth.Key -} - -// NewKeyRepository creates in-memory user repository -func NewKeyRepository() auth.KeyRepository { - return &keyRepositoryMock{ - keys: make(map[string]auth.Key), - } -} - -func (krm *keyRepositoryMock) Save(ctx context.Context, key auth.Key) (string, error) { - krm.mu.Lock() - defer krm.mu.Unlock() - - if _, ok := krm.keys[key.ID]; ok { - return "", errors.ErrConflict - } - - krm.keys[key.ID] = key - return key.ID, nil -} -func (krm *keyRepositoryMock) RetrieveByID(ctx context.Context, issuerID, id string) (auth.Key, error) { - krm.mu.Lock() - defer krm.mu.Unlock() - - if key, ok := krm.keys[id]; ok && key.IssuerID == issuerID { - return key, nil - } - - return auth.Key{}, errors.ErrNotFound -} - -func (krm *keyRepositoryMock) RetrieveAll(ctx context.Context, issuerID string, pm auth.PageMetadata) (auth.KeyPage, error) { - krm.mu.Lock() - defer krm.mu.Unlock() - - kp := auth.KeyPage{} - i := uint64(0) - - for _, k := range krm.keys { - if i >= pm.Offset && i < (pm.Limit+pm.Offset) { - kp.Keys = append(kp.Keys, k) - } - i++ - } - - kp.Offset = pm.Offset - kp.Limit = pm.Limit - kp.Total = uint64(i) - - return kp, nil -} - -func (krm *keyRepositoryMock) Remove(ctx context.Context, issuerID, id string) error { - krm.mu.Lock() - defer krm.mu.Unlock() - if key, ok := krm.keys[id]; ok && key.IssuerID == issuerID { - delete(krm.keys, id) - } - return nil -} diff --git a/auth/mocks/policies.go b/auth/mocks/policies.go deleted file mode 100644 index 6213c03b9a..0000000000 --- a/auth/mocks/policies.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "sync" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" - acl "github.com/ory/keto/proto/ory/keto/acl/v1alpha1" -) - -type MockSubjectSet struct { - Object string - Relation string -} - -type policyAgentMock struct { - mu sync.Mutex - // authzDb stores 'subject' as a key, and subject policies as a value. - authzDB map[string][]MockSubjectSet -} - -// NewKetoMock returns a mock service for Keto. -// This mock is not implemented yet. -func NewKetoMock(db map[string][]MockSubjectSet) auth.PolicyAgent { - return &policyAgentMock{authzDB: db} -} - -func (pa *policyAgentMock) CheckPolicy(ctx context.Context, pr auth.PolicyReq) error { - pa.mu.Lock() - defer pa.mu.Unlock() - - ssList := pa.authzDB[pr.Subject] - for _, ss := range ssList { - if ss.Object == pr.Object && ss.Relation == pr.Relation { - return nil - } - } - return errors.ErrAuthorization -} - -func (pa *policyAgentMock) AddPolicy(ctx context.Context, pr auth.PolicyReq) error { - pa.mu.Lock() - defer pa.mu.Unlock() - - pa.authzDB[pr.Subject] = append(pa.authzDB[pr.Subject], MockSubjectSet{Object: pr.Object, Relation: pr.Relation}) - return nil -} - -func (pa *policyAgentMock) DeletePolicy(ctx context.Context, pr auth.PolicyReq) error { - pa.mu.Lock() - defer pa.mu.Unlock() - - ssList := pa.authzDB[pr.Subject] - for k, ss := range ssList { - if ss.Object == pr.Object && ss.Relation == pr.Relation { - ssList[k] = MockSubjectSet{} - } - } - return nil -} - -func (pa *policyAgentMock) RetrievePolicies(ctx context.Context, pr auth.PolicyReq) ([]*acl.RelationTuple, error) { - pa.mu.Lock() - defer pa.mu.Unlock() - - ssList := pa.authzDB[pr.Subject] - tuple := []*acl.RelationTuple{} - for _, ss := range ssList { - if ss.Relation == pr.Relation { - tuple = append(tuple, &acl.RelationTuple{Object: ss.Object, Relation: ss.Relation}) - } - } - return tuple, nil -} diff --git a/auth/policies.go b/auth/policies.go deleted file mode 100644 index 4a7ab1e53c..0000000000 --- a/auth/policies.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth - -import ( - "context" - - acl "github.com/ory/keto/proto/ory/keto/acl/v1alpha1" -) - -// PolicyReq represents an argument struct for making a policy related -// function calls. -type PolicyReq struct { - Subject string - Object string - Relation string -} - -type PolicyPage struct { - Policies []string -} - -// Authz represents a authorization service. It exposes -// functionalities through `auth` to perform authorization. -type Authz interface { - // Authorize checks authorization of the given `subject`. Basically, - // Authorize verifies that Is `subject` allowed to `relation` on - // `object`. Authorize returns a non-nil error if the subject has - // no relation on the object (which simply means the operation is - // denied). - Authorize(ctx context.Context, pr PolicyReq) error - - // AddPolicy creates a policy for the given subject, so that, after - // AddPolicy, `subject` has a `relation` on `object`. Returns a non-nil - // error in case of failures. - AddPolicy(ctx context.Context, pr PolicyReq) error - - // AddPolicies adds new policies for given subjects. This method is - // only allowed to use as an admin. - AddPolicies(ctx context.Context, token, object string, subjectIDs, relations []string) error - - // DeletePolicy removes a policy. - DeletePolicy(ctx context.Context, pr PolicyReq) error - - // DeletePolicies deletes policies for given subjects. This method is - // only allowed to use as an admin. - DeletePolicies(ctx context.Context, token, object string, subjectIDs, relations []string) error - - // ListPolicies lists policies based on the given PolicyReq structure. - ListPolicies(ctx context.Context, pr PolicyReq) (PolicyPage, error) -} - -// PolicyAgent facilitates the communication to authorization -// services and implements Authz functionalities for certain -// authorization services (e.g. ORY Keto). -type PolicyAgent interface { - // CheckPolicy checks if the subject has a relation on the object. - // It returns a non-nil error if the subject has no relation on - // the object (which simply means the operation is denied). - CheckPolicy(ctx context.Context, pr PolicyReq) error - - // AddPolicy creates a policy for the given subject, so that, after - // AddPolicy, `subject` has a `relation` on `object`. Returns a non-nil - // error in case of failures. - AddPolicy(ctx context.Context, pr PolicyReq) error - - // DeletePolicy removes a policy. - DeletePolicy(ctx context.Context, pr PolicyReq) error - - RetrievePolicies(ctx context.Context, pr PolicyReq) ([]*acl.RelationTuple, error) -} diff --git a/auth/postgres/doc.go b/auth/postgres/doc.go deleted file mode 100644 index 6bce8d090b..0000000000 --- a/auth/postgres/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres contains Key repository implementations using -// PostgreSQL as the underlying database. -package postgres diff --git a/auth/postgres/groups.go b/auth/postgres/groups.go deleted file mode 100644 index 7068156810..0000000000 --- a/auth/postgres/groups.go +++ /dev/null @@ -1,752 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import ( - "context" - "database/sql" - "database/sql/driver" - "encoding/json" - "fmt" - "time" - - "github.com/gofrs/uuid" - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/jmoiron/sqlx" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" -) - -var ( - errStringToUUID = errors.New("error converting string to uuid") - errGetTotal = errors.New("failed to get total number of groups") - errCreateMetadataQuery = errors.New("failed to create query for metadata") - groupIDFkeyy = "group_relations_group_id_fkey" -) - -var _ auth.GroupRepository = (*groupRepository)(nil) - -type groupRepository struct { - db Database -} - -// NewGroupRepo instantiates a PostgreSQL implementation of group -// repository. -func NewGroupRepo(db Database) auth.GroupRepository { - return &groupRepository{ - db: db, - } -} - -func (gr groupRepository) Save(ctx context.Context, g auth.Group) (auth.Group, error) { - // For root group path is initialized with id - q := `INSERT INTO groups (name, description, id, path, owner_id, metadata, created_at, updated_at) - VALUES (:name, :description, :id, :id, :owner_id, :metadata, :created_at, :updated_at) - RETURNING id, name, owner_id, parent_id, description, metadata, path, nlevel(path) as level, created_at, updated_at` - if g.ParentID != "" { - // Path is constructed in insert_group_tr - init.go - q = `INSERT INTO groups (name, description, id, owner_id, parent_id, metadata, created_at, updated_at) - VALUES ( :name, :description, :id, :owner_id, :parent_id, :metadata, :created_at, :updated_at) - RETURNING id, name, owner_id, parent_id, description, metadata, path, nlevel(path) as level, created_at, updated_at` - } - - dbg, err := toDBGroup(g) - if err != nil { - return auth.Group{}, err - } - - row, err := gr.db.NamedQueryContext(ctx, q, dbg) - if err != nil { - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return auth.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.ForeignKeyViolation: - return auth.Group{}, errors.Wrap(errors.ErrCreateEntity, err) - case pgerrcode.UniqueViolation: - return auth.Group{}, errors.Wrap(errors.ErrConflict, err) - case pgerrcode.StringDataRightTruncationDataException: - return auth.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return auth.Group{}, errors.Wrap(errors.ErrCreateEntity, err) - } - - defer row.Close() - row.Next() - dbg = dbGroup{} - if err := row.StructScan(&dbg); err != nil { - return auth.Group{}, err - } - - return toGroup(dbg) -} - -func (gr groupRepository) Update(ctx context.Context, g auth.Group) (auth.Group, error) { - q := `UPDATE groups SET name = :name, description = :description, metadata = :metadata, updated_at = :updated_at WHERE id = :id - RETURNING id, name, owner_id, parent_id, description, metadata, path, nlevel(path) as level, created_at, updated_at` - - dbu, err := toDBGroup(g) - if err != nil { - return auth.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) - } - - row, err := gr.db.NamedQueryContext(ctx, q, dbu) - if err != nil { - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return auth.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return auth.Group{}, errors.Wrap(errors.ErrConflict, err) - case pgerrcode.StringDataRightTruncationDataException: - return auth.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - } - return auth.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) - } - - defer row.Close() - row.Next() - dbu = dbGroup{} - if err := row.StructScan(&dbu); err != nil { - return g, errors.Wrap(errors.ErrUpdateEntity, err) - } - - return toGroup(dbu) -} - -func (gr groupRepository) Delete(ctx context.Context, groupID string) error { - qd := `DELETE FROM groups WHERE id = :id` - group := auth.Group{ - ID: groupID, - } - dbg, err := toDBGroup(group) - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - res, err := gr.db.NamedExecContext(ctx, qd, dbg) - if err != nil { - pqErr, ok := err.(*pgconn.PgError) - if ok { - switch pqErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.ForeignKeyViolation: - switch pqErr.ConstraintName { - case groupIDFkeyy: - return errors.Wrap(auth.ErrGroupNotEmpty, err) - } - return errors.Wrap(errors.ErrConflict, err) - } - } - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - cnt, err := res.RowsAffected() - if err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - - if cnt != 1 { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - return nil -} - -func (gr groupRepository) RetrieveByID(ctx context.Context, id string) (auth.Group, error) { - dbu := dbGroup{ - ID: id, - } - q := `SELECT id, name, owner_id, parent_id, description, metadata, path, nlevel(path) as level, created_at, updated_at FROM groups WHERE id = $1` - if err := gr.db.QueryRowxContext(ctx, q, id).StructScan(&dbu); err != nil { - if err == sql.ErrNoRows { - return auth.Group{}, errors.Wrap(errors.ErrNotFound, err) - - } - return auth.Group{}, errors.Wrap(errors.ErrViewEntity, err) - } - return toGroup(dbu) -} - -func (gr groupRepository) RetrieveAll(ctx context.Context, pm auth.PageMetadata) (auth.GroupPage, error) { - _, metaQuery, err := getGroupsMetadataQuery("groups", pm.Metadata) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveAll, err) - } - - var mq string - if metaQuery != "" { - mq = fmt.Sprintf(" AND %s", metaQuery) - } - - q := fmt.Sprintf(`SELECT id, owner_id, parent_id, name, description, metadata, path, nlevel(path) as level, created_at, updated_at FROM groups - WHERE nlevel(path) <= :level %s ORDER BY path`, mq) - - dbPage, err := toDBGroupPage("", "", pm) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveAll, err) - } - - rows, err := gr.db.NamedQueryContext(ctx, q, dbPage) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveAll, err) - } - defer rows.Close() - - items, err := gr.processRows(rows) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveAll, err) - } - - cq := "SELECT COUNT(*) FROM groups" - if metaQuery != "" { - cq = fmt.Sprintf(" %s WHERE %s", cq, metaQuery) - } - - total, err := total(ctx, gr.db, cq, dbPage) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveAll, err) - } - - page := auth.GroupPage{ - Groups: items, - PageMetadata: auth.PageMetadata{ - Total: total, - Size: uint64(len(items)), - }, - } - - return page, nil -} - -func (gr groupRepository) RetrieveAllParents(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - q := `SELECT g.id, g.name, g.owner_id, g.parent_id, g.description, g.metadata, g.path, nlevel(g.path) as level, g.created_at, g.updated_at - FROM groups parent, groups g - WHERE parent.id = :id AND g.path @> parent.path AND nlevel(parent.path) - nlevel(g.path) <= :level` - cq := `SELECT COUNT(*) FROM groups parent, groups g WHERE parent.id = :id AND g.path @> parent.path` - - gp, err := gr.retrieve(ctx, groupID, q, cq, pm) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveParents, err) - } - return gp, nil -} - -func (gr groupRepository) RetrieveAllChildren(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - q := `SELECT g.id, g.name, g.owner_id, g.parent_id, g.description, g.metadata, g.path, nlevel(g.path) as level, g.created_at, g.updated_at - FROM groups parent, groups g - WHERE parent.id = :id AND g.path <@ parent.path AND nlevel(g.path) - nlevel(parent.path) < :level` - - cq := `SELECT COUNT(*) FROM groups parent, groups g WHERE parent.id = :id AND g.path <@ parent.path ` - gp, err := gr.retrieve(ctx, groupID, q, cq, pm) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveChildren, err) - } - return gp, nil -} - -func (gr groupRepository) retrieve(ctx context.Context, groupID, retQuery, cntQuery string, pm auth.PageMetadata) (auth.GroupPage, error) { - if groupID == "" { - return auth.GroupPage{}, nil - } - _, mq, err := getGroupsMetadataQuery("g", pm.Metadata) - if err != nil { - return auth.GroupPage{}, err - } - if mq != "" { - mq = fmt.Sprintf("AND %s", mq) - } - - retQuery = fmt.Sprintf(`%s %s`, retQuery, mq) - cntQuery = fmt.Sprintf(`%s %s`, cntQuery, mq) - - dbPage, err := toDBGroupPage(groupID, "", pm) - if err != nil { - return auth.GroupPage{}, err - } - - rows, err := gr.db.NamedQueryContext(ctx, retQuery, dbPage) - if err != nil { - return auth.GroupPage{}, err - } - defer rows.Close() - - items, err := gr.processRows(rows) - if err != nil { - return auth.GroupPage{}, err - } - - total, err := total(ctx, gr.db, cntQuery, dbPage) - if err != nil { - return auth.GroupPage{}, err - } - - page := auth.GroupPage{ - Groups: items, - PageMetadata: auth.PageMetadata{ - Level: pm.Level, - Total: total, - Size: uint64(len(items)), - }, - } - - return page, nil - -} - -func (gr groupRepository) Members(ctx context.Context, groupID, groupType string, pm auth.PageMetadata) (auth.MemberPage, error) { - _, mq, err := getGroupsMetadataQuery("groups", pm.Metadata) - if err != nil { - return auth.MemberPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembers, err) - } - - q := fmt.Sprintf(`SELECT gr.member_id, gr.group_id, gr.type, gr.created_at, gr.updated_at FROM group_relations gr - WHERE gr.group_id = :group_id AND gr.type = :type %s`, mq) - - if groupType == "" { - q = fmt.Sprintf(`SELECT gr.member_id, gr.group_id, gr.type, gr.created_at, gr.updated_at FROM group_relations gr - WHERE gr.group_id = :group_id %s`, mq) - } - - params, err := toDBMemberPage("", groupID, groupType, pm) - if err != nil { - return auth.MemberPage{}, err - } - - rows, err := gr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return auth.MemberPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembers, err) - } - defer rows.Close() - - var items []auth.Member - for rows.Next() { - member := dbMember{} - if err := rows.StructScan(&member); err != nil { - return auth.MemberPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembers, err) - } - - if err != nil { - return auth.MemberPage{}, err - } - - items = append(items, auth.Member{ID: member.MemberID, Type: member.Type}) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM groups g, group_relations gr - WHERE gr.group_id = :group_id AND gr.group_id = g.id AND gr.type = :type %s;`, mq) - - total, err := total(ctx, gr.db, cq, params) - if err != nil { - return auth.MemberPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembers, err) - } - - page := auth.MemberPage{ - Members: items, - PageMetadata: auth.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - Size: uint64(len(items)), - }, - } - - return page, nil -} - -func (gr groupRepository) Memberships(ctx context.Context, memberID string, pm auth.PageMetadata) (auth.GroupPage, error) { - _, mq, err := getGroupsMetadataQuery("groups", pm.Metadata) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembership, err) - } - - if mq != "" { - mq = fmt.Sprintf("AND %s", mq) - } - q := fmt.Sprintf(`SELECT g.id, g.owner_id, g.parent_id, g.name, g.description, g.metadata - FROM group_relations gr, groups g - WHERE gr.group_id = g.id and gr.member_id = :member_id - %s ORDER BY id LIMIT :limit OFFSET :offset;`, mq) - - params, err := toDBMemberPage(memberID, "", "", pm) - if err != nil { - return auth.GroupPage{}, err - } - - rows, err := gr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembership, err) - } - defer rows.Close() - - var items []auth.Group - for rows.Next() { - dbg := dbGroup{} - if err := rows.StructScan(&dbg); err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembership, err) - } - gr, err := toGroup(dbg) - if err != nil { - return auth.GroupPage{}, err - } - items = append(items, gr) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM group_relations gr, groups g - WHERE gr.group_id = g.id and gr.member_id = :member_id %s `, mq) - - total, err := total(ctx, gr.db, cq, params) - if err != nil { - return auth.GroupPage{}, errors.Wrap(auth.ErrFailedToRetrieveMembership, err) - } - - page := auth.GroupPage{ - Groups: items, - PageMetadata: auth.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - Size: uint64(len(items)), - }, - } - - return page, nil -} - -func (gr groupRepository) Assign(ctx context.Context, groupID, groupType string, ids ...string) error { - tx, err := gr.db.BeginTxx(ctx, nil) - if err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - qIns := `INSERT INTO group_relations (group_id, member_id, type, created_at, updated_at) - VALUES(:group_id, :member_id, :type, :created_at, :updated_at)` - - for _, id := range ids { - dbg, err := toDBGroupRelation(id, groupID, groupType) - if err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - created := time.Now() - dbg.CreatedAt = created - dbg.UpdatedAt = created - - if _, err := tx.NamedExecContext(ctx, qIns, dbg); err != nil { - if rollbackErr := tx.Rollback(); rollbackErr != nil { - err = errors.Wrap(err, rollbackErr) - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.ForeignKeyViolation: - return errors.Wrap(errors.ErrConflict, errors.New(pgErr.Detail)) - case pgerrcode.UniqueViolation: - return errors.Wrap(auth.ErrMemberAlreadyAssigned, errors.New(pgErr.Detail)) - } - } - - return errors.Wrap(auth.ErrAssignToGroup, err) - } - } - - if err = tx.Commit(); err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - return nil -} - -func (gr groupRepository) Unassign(ctx context.Context, groupID string, ids ...string) error { - tx, err := gr.db.BeginTxx(ctx, nil) - if err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - qDel := `DELETE from group_relations WHERE group_id = :group_id AND member_id = :member_id` - - for _, id := range ids { - dbg, err := toDBGroupRelation(id, groupID, "") - if err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - if _, err := tx.NamedExecContext(ctx, qDel, dbg); err != nil { - if rollbackErr := tx.Rollback(); rollbackErr != nil { - err = errors.Wrap(rollbackErr, err) - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return errors.Wrap(errors.ErrConflict, err) - } - } - - return errors.Wrap(auth.ErrAssignToGroup, err) - } - } - - if err = tx.Commit(); err != nil { - return errors.Wrap(auth.ErrAssignToGroup, err) - } - - return nil -} - -type dbMember struct { - MemberID string `db:"member_id"` - GroupID string `db:"group_id"` - Type string `db:"type"` - CreatedAt time.Time `db:"created_at"` - UpdatedAt time.Time `db:"updated_at"` -} - -type dbGroup struct { - ID string `db:"id"` - ParentID sql.NullString `db:"parent_id"` - OwnerID uuid.NullUUID `db:"owner_id"` - Name string `db:"name"` - Description string `db:"description"` - Metadata dbMetadata `db:"metadata"` - Level int `db:"level"` - Path string `db:"path"` - CreatedAt time.Time `db:"created_at"` - UpdatedAt time.Time `db:"updated_at"` -} - -type dbGroupPage struct { - ID string `db:"id"` - ParentID string `db:"parent_id"` - OwnerID uuid.NullUUID `db:"owner_id"` - Metadata dbMetadata `db:"metadata"` - Path string `db:"path"` - Level uint64 `db:"level"` - Total uint64 `db:"total"` - Limit uint64 `db:"limit"` - Offset uint64 `db:"offset"` -} - -type dbMemberPage struct { - GroupID string `db:"group_id"` - MemberID string `db:"member_id"` - Type string `db:"type"` - Metadata dbMetadata `db:"metadata"` - Limit uint64 `db:"limit"` - Offset uint64 `db:"offset"` - Size uint64 -} - -func toUUID(id string) (uuid.NullUUID, error) { - var uid uuid.NullUUID - if id == "" { - return uuid.NullUUID{UUID: uuid.Nil, Valid: false}, nil - } - err := uid.Scan(id) - return uid, err -} - -func toString(id uuid.NullUUID) (string, error) { - if id.Valid { - return id.UUID.String(), nil - } - if id.UUID == uuid.Nil { - return "", nil - } - return "", errStringToUUID -} - -func toDBGroup(g auth.Group) (dbGroup, error) { - ownerID, err := toUUID(g.OwnerID) - if err != nil { - return dbGroup{}, err - } - - var parentID sql.NullString - if g.ParentID != "" { - parentID = sql.NullString{String: g.ParentID, Valid: true} - } - - meta := dbMetadata(g.Metadata) - - return dbGroup{ - ID: g.ID, - Name: g.Name, - ParentID: parentID, - OwnerID: ownerID, - Description: g.Description, - Metadata: meta, - Path: g.Path, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, - }, nil -} - -func toDBGroupPage(id, path string, pm auth.PageMetadata) (dbGroupPage, error) { - level := auth.MaxLevel - if pm.Level < auth.MaxLevel { - level = pm.Level - } - return dbGroupPage{ - Metadata: dbMetadata(pm.Metadata), - ID: id, - Path: path, - Level: level, - Total: pm.Total, - Offset: pm.Offset, - Limit: pm.Limit, - }, nil -} - -func toDBMemberPage(memberID, groupID, groupType string, pm auth.PageMetadata) (dbMemberPage, error) { - return dbMemberPage{ - GroupID: groupID, - MemberID: memberID, - Type: groupType, - Metadata: dbMetadata(pm.Metadata), - Offset: pm.Offset, - Limit: pm.Limit, - }, nil -} - -func toGroup(dbu dbGroup) (auth.Group, error) { - ownerID, err := toString(dbu.OwnerID) - if err != nil { - return auth.Group{}, err - } - - return auth.Group{ - ID: dbu.ID, - Name: dbu.Name, - ParentID: dbu.ParentID.String, - OwnerID: ownerID, - Description: dbu.Description, - Metadata: auth.GroupMetadata(dbu.Metadata), - Level: dbu.Level, - Path: dbu.Path, - UpdatedAt: dbu.UpdatedAt, - CreatedAt: dbu.CreatedAt, - }, nil -} - -type dbGroupRelation struct { - GroupID sql.NullString `db:"group_id"` - MemberID sql.NullString `db:"member_id"` - CreatedAt time.Time `db:"created_at"` - UpdatedAt time.Time `db:"updated_at"` - Type string `db:"type"` -} - -func toDBGroupRelation(memberID, groupID, groupType string) (dbGroupRelation, error) { - var grID sql.NullString - if groupID != "" { - grID = sql.NullString{String: groupID, Valid: true} - } - - var mID sql.NullString - if memberID != "" { - mID = sql.NullString{String: memberID, Valid: true} - } - - return dbGroupRelation{ - GroupID: grID, - MemberID: mID, - Type: groupType, - }, nil -} - -func getGroupsMetadataQuery(db string, m auth.GroupMetadata) (mb []byte, mq string, err error) { - if len(m) > 0 { - mq = `metadata @> :metadata` - if db != "" { - mq = db + "." + mq - } - - b, err := json.Marshal(m) - if err != nil { - return nil, "", errors.Wrap(err, errCreateMetadataQuery) - } - mb = b - } - return mb, mq, nil -} - -func (gr groupRepository) processRows(rows *sqlx.Rows) ([]auth.Group, error) { - var items []auth.Group - for rows.Next() { - dbg := dbGroup{} - if err := rows.StructScan(&dbg); err != nil { - return items, err - } - group, err := toGroup(dbg) - if err != nil { - return items, err - } - items = append(items, group) - } - return items, nil -} - -func total(ctx context.Context, db Database, query string, params interface{}) (uint64, error) { - rows, err := db.NamedQueryContext(ctx, query, params) - if err != nil { - return 0, errors.Wrap(errGetTotal, err) - } - defer rows.Close() - total := uint64(0) - if rows.Next() { - if err := rows.Scan(&total); err != nil { - return 0, errors.Wrap(errGetTotal, err) - } - } - return total, nil -} - -// dbMetadata type for handling metadata properly in database/sql -type dbMetadata map[string]interface{} - -// Scan - Implement the database/sql scanner interface -func (m *dbMetadata) Scan(value interface{}) error { - if value == nil { - return nil - } - - b, ok := value.([]byte) - if !ok { - return errors.ErrScanMetadata - } - - if err := json.Unmarshal(b, m); err != nil { - return err - } - - return nil -} - -// Value Implements valuer -func (m dbMetadata) Value() (driver.Value, error) { - if len(m) == 0 { - return nil, nil - } - - b, err := json.Marshal(m) - if err != nil { - return nil, err - } - return b, err -} diff --git a/auth/postgres/groups_test.go b/auth/postgres/groups_test.go deleted file mode 100644 index e3db3aad11..0000000000 --- a/auth/postgres/groups_test.go +++ /dev/null @@ -1,777 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres_test - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/auth/postgres" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - maxNameSize = 254 - maxDescSize = 1024 - groupName = "Mainflux" - description = "description" -) - -var ( - invalidName = strings.Repeat("m", maxNameSize+1) - invalidDesc = strings.Repeat("m", maxDescSize+1) - metadata = auth.GroupMetadata{ - "admin": "true", - } -) - -func generateGroupID(t *testing.T) string { - grpID, err := ulidProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - return grpID -} - -func TestGroupSave(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - usrID, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - wrongID, err := ulidProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - grpID := generateGroupID(t) - - cases := []struct { - desc string - group auth.Group - err error - }{ - { - desc: "create new group", - group: auth.Group{ - ID: grpID, - OwnerID: usrID, - Name: groupName, - }, - err: nil, - }, - { - desc: "create new group with existing name", - group: auth.Group{ - ID: grpID, - OwnerID: usrID, - Name: groupName, - }, - err: errors.ErrConflict, - }, - { - desc: "create group with invalid name", - group: auth.Group{ - ID: generateGroupID(t), - OwnerID: usrID, - Name: invalidName, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "create group with invalid description", - group: auth.Group{ - ID: generateGroupID(t), - OwnerID: usrID, - Name: groupName, - Description: invalidDesc, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "create group with parent", - group: auth.Group{ - ID: generateGroupID(t), - ParentID: grpID, - OwnerID: usrID, - Name: "withParent", - }, - err: nil, - }, - { - desc: "create group with parent and existing name", - group: auth.Group{ - ID: generateGroupID(t), - ParentID: grpID, - OwnerID: usrID, - Name: groupName, - }, - err: nil, - }, - { - desc: "create group with wrong parent", - group: auth.Group{ - ID: generateGroupID(t), - ParentID: wrongID, - OwnerID: usrID, - Name: "wrongParent", - }, - err: errors.ErrCreateEntity, - }, - } - - for _, tc := range cases { - _, err := groupRepo.Save(context.Background(), tc.group) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } - -} - -func TestGroupRetrieveByID(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - require.Nil(t, err, fmt.Sprintf("group id unexpected error: %s", err)) - group1 := auth.Group{ - ID: generateGroupID(t), - Name: groupName + "TestGroupRetrieveByID1", - OwnerID: uid, - } - - _, err = groupRepo.Save(context.Background(), group1) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - retrieved, err := groupRepo.RetrieveByID(context.Background(), group1.ID) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - assert.True(t, retrieved.ID == group1.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group1.ID, retrieved.ID)) - - // Round to milliseconds as otherwise saving and retrieving from DB - // adds rounding error. - creationTime := time.Now().UTC().Round(time.Millisecond) - group2 := auth.Group{ - ID: generateGroupID(t), - Name: groupName + "TestGroupRetrieveByID", - OwnerID: uid, - ParentID: group1.ID, - CreatedAt: creationTime, - UpdatedAt: creationTime, - Description: description, - Metadata: metadata, - } - - _, err = groupRepo.Save(context.Background(), group2) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - retrieved, err = groupRepo.RetrieveByID(context.Background(), group2.ID) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - assert.True(t, retrieved.ID == group2.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group2.ID, retrieved.ID)) - assert.True(t, retrieved.CreatedAt.Equal(creationTime), fmt.Sprintf("Save group, CreatedAt: expected %s got %s\n", creationTime, retrieved.CreatedAt)) - assert.True(t, retrieved.UpdatedAt.Equal(creationTime), fmt.Sprintf("Save group, UpdatedAt: expected %s got %s\n", creationTime, retrieved.UpdatedAt)) - assert.True(t, retrieved.Level == 2, fmt.Sprintf("Save group, Level: expected %d got %d\n", retrieved.Level, 2)) - assert.True(t, retrieved.ParentID == group1.ID, fmt.Sprintf("Save group, Level: expected %s got %s\n", group1.ID, retrieved.ParentID)) - assert.True(t, retrieved.Description == description, fmt.Sprintf("Save group, Description: expected %v got %v\n", retrieved.Description, description)) - assert.True(t, retrieved.Path == fmt.Sprintf("%s.%s", group1.ID, group2.ID), fmt.Sprintf("Save group, Path: expected %s got %s\n", fmt.Sprintf("%s.%s", group1.ID, group2.ID), retrieved.Path)) - - retrieved, err = groupRepo.RetrieveByID(context.Background(), generateGroupID(t)) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Retrieve group: expected %s got %s\n", errors.ErrNotFound, err)) -} - -func TestGroupUpdate(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - updateTime := time.Now().UTC() - groupID := generateGroupID(t) - - group := auth.Group{ - ID: groupID, - Name: groupName + "TestGroupUpdate", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - Description: description, - Metadata: metadata, - } - - _, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - retrieved, err := groupRepo.RetrieveByID(context.Background(), group.ID) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - cases := []struct { - desc string - groupUpdate auth.Group - groupExpected auth.Group - err error - }{ - { - desc: "update group for existing id", - groupUpdate: auth.Group{ - ID: groupID, - Name: groupName + "Updated", - UpdatedAt: updateTime, - Metadata: auth.GroupMetadata{"admin": "false"}, - }, - groupExpected: auth.Group{ - Name: groupName + "Updated", - UpdatedAt: updateTime, - Metadata: auth.GroupMetadata{"admin": "false"}, - CreatedAt: retrieved.CreatedAt, - Path: retrieved.Path, - ParentID: retrieved.ParentID, - ID: retrieved.ID, - Level: retrieved.Level, - }, - err: nil, - }, - { - desc: "update group for non-existing id", - groupUpdate: auth.Group{ - ID: "wrong", - Name: groupName + "-2", - }, - err: errors.ErrUpdateEntity, - }, - { - desc: "update group for invalid name", - groupUpdate: auth.Group{ - ID: groupID, - Name: invalidName, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "update group for invalid description", - groupUpdate: auth.Group{ - ID: groupID, - Description: invalidDesc, - }, - err: errors.ErrMalformedEntity, - }, - } - - for _, tc := range cases { - updated, err := groupRepo.Update(context.Background(), tc.groupUpdate) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - if tc.desc == "update group for existing id" { - assert.True(t, updated.Level == tc.groupExpected.Level, fmt.Sprintf("%s:Level: expected %d got %d\n", tc.desc, tc.groupExpected.Level, updated.Level)) - assert.True(t, updated.Name == tc.groupExpected.Name, fmt.Sprintf("%s:Name: expected %s got %s\n", tc.desc, tc.groupExpected.Name, updated.Name)) - assert.True(t, updated.Metadata["admin"] == tc.groupExpected.Metadata["admin"], fmt.Sprintf("%s:Level: expected %d got %d\n", tc.desc, tc.groupExpected.Metadata["admin"], updated.Metadata["admin"])) - } - } -} - -func TestGroupDelete(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - groupParent := auth.Group{ - ID: generateGroupID(t), - Name: groupName + "Updated", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - groupParent, err = groupRepo.Save(context.Background(), groupParent) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - creationTime = time.Now().UTC() - groupChild1 := auth.Group{ - ID: generateGroupID(t), - ParentID: groupParent.ID, - Name: groupName + "child1", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - creationTime = time.Now().UTC() - groupChild2 := auth.Group{ - ID: generateGroupID(t), - ParentID: groupParent.ID, - Name: groupName + "child2", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - meta := auth.PageMetadata{ - Level: auth.MaxLevel, - } - - groupChild1, err = groupRepo.Save(context.Background(), groupChild1) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - groupChild2, err = groupRepo.Save(context.Background(), groupChild2) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - gp, err := groupRepo.RetrieveAllChildren(context.Background(), groupParent.ID, meta) - assert.True(t, errors.Contains(err, nil), fmt.Sprintf("Retrieve children for parent: expected %v got %v\n", nil, err)) - assert.True(t, gp.Total == 3, fmt.Sprintf("Number of children + parent: expected %d got %d\n", 3, gp.Total)) - - thingID, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("thing id create unexpected error: %s", err)) - - err = groupRepo.Assign(context.Background(), groupChild1.ID, "things", thingID) - require.Nil(t, err, fmt.Sprintf("thing assign got unexpected error: %s", err)) - - err = groupRepo.Delete(context.Background(), groupChild1.ID) - assert.True(t, errors.Contains(err, auth.ErrGroupNotEmpty), fmt.Sprintf("delete non empty group: expected %v got %v\n", auth.ErrGroupNotEmpty, err)) - - err = groupRepo.Delete(context.Background(), groupChild2.ID) - assert.True(t, errors.Contains(err, nil), fmt.Sprintf("delete empty group: expected %v got %v\n", nil, err)) - - err = groupRepo.Delete(context.Background(), groupParent.ID) - assert.True(t, errors.Contains(err, auth.ErrGroupNotEmpty), fmt.Sprintf("delete parent with children with members: expected %v got %v\n", auth.ErrGroupNotEmpty, err)) - - gp, err = groupRepo.RetrieveAllChildren(context.Background(), groupParent.ID, meta) - assert.True(t, errors.Contains(err, nil), fmt.Sprintf("retrieve children after one child removed: expected %v got %v\n", nil, err)) - assert.True(t, gp.Total == 2, fmt.Sprintf("number of children + parent: expected %d got %d\n", 2, gp.Total)) - - err = groupRepo.Unassign(context.Background(), groupChild1.ID, thingID) - require.Nil(t, err, fmt.Sprintf("failed to remove thing from a group error: %s", err)) - - err = groupRepo.Delete(context.Background(), groupParent.ID) - assert.True(t, errors.Contains(err, nil), fmt.Sprintf("delete parent with children with no members: expected %v got %v\n", nil, err)) - - _, err = groupRepo.RetrieveByID(context.Background(), groupChild1.ID) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("retrieve child after parent removed: expected %v got %v\n", nil, err)) -} - -func TestRetrieveAll(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - metadata := auth.PageMetadata{ - Metadata: auth.GroupMetadata{ - "field": "value", - }, - Level: auth.MaxLevel, - } - wrongMeta := auth.PageMetadata{ - Metadata: auth.GroupMetadata{ - "wrong": "wrong", - }, - Level: auth.MaxLevel, - } - - metaNum := uint64(3) - - n := uint64(auth.MaxLevel) - parentID := "" - for i := uint64(0); i < n; i++ { - creationTime := time.Now().UTC() - group := auth.Group{ - ID: generateGroupID(t), - Name: fmt.Sprintf("%s-%d", groupName, i), - OwnerID: uid, - ParentID: parentID, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - // Create Groups with metadata. - if i < metaNum { - group.Metadata = metadata.Metadata - } - - _, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = group.ID - } - - cases := map[string]struct { - Size uint64 - Metadata auth.PageMetadata - }{ - "retrieve all groups": { - Metadata: auth.PageMetadata{ - Total: n, - Limit: n, - Level: auth.MaxLevel, - }, - Size: n, - }, - "retrieve groups with existing metadata": { - Metadata: auth.PageMetadata{ - Total: metaNum, - Limit: n, - Level: auth.MaxLevel, - Metadata: metadata.Metadata, - }, - Size: metaNum, - }, - "retrieve groups with non-existing metadata": { - Metadata: auth.PageMetadata{ - Total: uint64(0), - Limit: n, - Level: auth.MaxLevel, - Metadata: wrongMeta.Metadata, - }, - Size: uint64(0), - }, - "retrieve groups with hierarchy level depth": { - Metadata: auth.PageMetadata{ - Total: uint64(metaNum), - Limit: n, - Level: auth.MaxLevel, - Metadata: metadata.Metadata, - }, - Size: uint64(metaNum), - }, - "retrieve groups with hierarchy level depth and existing metadata": { - Metadata: auth.PageMetadata{ - Total: uint64(metaNum), - Limit: n, - Level: auth.MaxLevel, - Metadata: metadata.Metadata, - }, - Size: uint64(metaNum), - }, - } - - for desc, tc := range cases { - page, err := groupRepo.RetrieveAll(context.Background(), tc.Metadata) - size := len(page.Groups) - assert.Equal(t, tc.Size, uint64(size), fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.Size, size)) - assert.Equal(t, tc.Metadata.Total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", desc, tc.Metadata.Total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) - } -} - -func TestRetrieveAllParents(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - metadata := auth.GroupMetadata{ - "field": "value", - } - wrongMeta := auth.GroupMetadata{ - "wrong": "wrong", - } - - p, err := groupRepo.RetrieveAll(context.Background(), auth.PageMetadata{Level: auth.MaxLevel}) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - assert.Equal(t, uint64(0), p.Total, fmt.Sprintf("expected total %d got %d\n", 0, p.Total)) - - metaNum := uint64(3) - - n := uint64(10) - parentID := "" - parentMiddle := "" - for i := uint64(0); i < n; i++ { - creationTime := time.Now().UTC() - group := auth.Group{ - ID: generateGroupID(t), - Name: fmt.Sprintf("%s-%d", groupName, i), - OwnerID: uid, - ParentID: parentID, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - // Create Groups with metadata. - if n-i <= metaNum { - group.Metadata = metadata - } - if i == n/2 { - parentMiddle = group.ID - } - _, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = group.ID - } - - cases := map[string]struct { - level uint64 - parentID string - Size uint64 - Total uint64 - Metadata auth.GroupMetadata - }{ - "retrieve all parents": { - Total: n, - Size: auth.MaxLevel + 1, - level: auth.MaxLevel, - parentID: parentID, - }, - "retrieve groups with existing metadata": { - Total: metaNum, - Size: metaNum, - Metadata: metadata, - parentID: parentID, - level: auth.MaxLevel, - }, - "retrieve groups with non-existing metadata": { - Total: uint64(0), - Metadata: wrongMeta, - Size: uint64(0), - level: auth.MaxLevel, - parentID: parentID, - }, - "retrieve groups with hierarchy level depth": { - Total: n, - Size: 2 + 1, - level: uint64(2), - parentID: parentID, - }, - "retrieve groups with hierarchy level depth and existing metadata": { - Total: metaNum, - Size: metaNum, - level: 3, - Metadata: metadata, - parentID: parentID, - }, - "retrieve parent groups from children in the middle": { - Total: n/2 + 1, - Size: n/2 + 1, - level: auth.MaxLevel, - parentID: parentMiddle, - }, - } - - for desc, tc := range cases { - page, err := groupRepo.RetrieveAllParents(context.Background(), tc.parentID, auth.PageMetadata{Level: tc.level, Metadata: tc.Metadata}) - size := len(page.Groups) - assert.Equal(t, tc.Size, uint64(size), fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.Size, size)) - assert.Equal(t, tc.Total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", desc, tc.Total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) - } -} - -func TestRetrieveAllChildren(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - metadata := auth.GroupMetadata{ - "field": "value", - } - wrongMeta := auth.GroupMetadata{ - "wrong": "wrong", - } - - metaNum := uint64(3) - - n := uint64(10) - groupID := generateGroupID(t) - firstParentID := groupID - parentID := "" - parentMiddle := "" - for i := uint64(0); i < n; i++ { - creationTime := time.Now().UTC() - group := auth.Group{ - ID: groupID, - Name: fmt.Sprintf("%s-%d", groupName, i), - OwnerID: uid, - ParentID: parentID, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - // Create Groups with metadata. - if i < metaNum { - group.Metadata = metadata - } - if i == n/2 { - parentMiddle = group.ID - } - _, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = group.ID - groupID = generateGroupID(t) - } - - p, err := groupRepo.RetrieveAll(context.Background(), auth.PageMetadata{Level: auth.MaxLevel}) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - assert.Equal(t, n, p.Total, fmt.Sprintf("expected total %d got %d\n", n, p.Total)) - - cases := map[string]struct { - parentID string - size uint64 - total uint64 - metadata auth.PageMetadata - }{ - "retrieve all children": { - size: auth.MaxLevel, - total: n, - metadata: auth.PageMetadata{ - Level: auth.MaxLevel, - }, - parentID: firstParentID, - }, - "retrieve groups with existing metadata": { - size: metaNum, - total: metaNum, - metadata: auth.PageMetadata{ - Level: auth.MaxLevel, - Metadata: metadata, - }, - parentID: firstParentID, - }, - "retrieve groups with non-existing metadata": { - total: 0, - size: 0, - metadata: auth.PageMetadata{ - Level: auth.MaxLevel, - Metadata: wrongMeta, - }, - parentID: firstParentID, - }, - "retrieve groups with hierarchy level depth": { - total: n, - size: 2, - metadata: auth.PageMetadata{ - Level: 2, - }, - parentID: firstParentID, - }, - "retrieve groups with hierarchy level depth and existing metadata": { - total: metaNum, - size: metaNum, - metadata: auth.PageMetadata{ - Level: 3, - Metadata: metadata, - }, - parentID: firstParentID, - }, - "retrieve parent groups from children in the middle": { - total: n / 2, - size: n / 2, - metadata: auth.PageMetadata{ - Level: auth.MaxLevel, - }, - parentID: parentMiddle, - }, - } - - for desc, tc := range cases { - page, err := groupRepo.RetrieveAllChildren(context.Background(), tc.parentID, tc.metadata) - size := len(page.Groups) - assert.Equal(t, tc.size, uint64(size), fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) - assert.Equal(t, tc.total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", desc, tc.total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) - } -} - -func TestAssign(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - group := auth.Group{ - ID: generateGroupID(t), - Name: groupName + "Updated", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - pm := auth.PageMetadata{ - Offset: 0, - Limit: 10, - } - - group, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - mid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - err = groupRepo.Assign(context.Background(), group.ID, "things", mid) - require.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - - mp, err := groupRepo.Members(context.Background(), group.ID, "things", pm) - require.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - assert.True(t, mp.Total == 1, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 1, mp.Total)) - - err = groupRepo.Assign(context.Background(), group.ID, "things", mid) - assert.True(t, errors.Contains(err, auth.ErrMemberAlreadyAssigned), fmt.Sprintf("assign member again: expected %v got %v\n", auth.ErrMemberAlreadyAssigned, err)) -} - -func TestUnassign(t *testing.T) { - t.Cleanup(func() { cleanUp(t) }) - dbMiddleware := postgres.NewDatabase(db) - groupRepo := postgres.NewGroupRepo(dbMiddleware) - - uid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - group := auth.Group{ - ID: generateGroupID(t), - Name: groupName + "Updated", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - pm := auth.PageMetadata{ - Offset: 0, - Limit: 10, - } - - group, err = groupRepo.Save(context.Background(), group) - require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - mid, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - err = groupRepo.Assign(context.Background(), group.ID, "things", mid) - require.Nil(t, err, fmt.Sprintf("member assign unexpected error: %s", err)) - - mid, err = idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - err = groupRepo.Assign(context.Background(), group.ID, "things", mid) - require.Nil(t, err, fmt.Sprintf("member assign unexpected error: %s", err)) - - mp, err := groupRepo.Members(context.Background(), group.ID, "things", pm) - require.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - assert.True(t, mp.Total == 2, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 2, mp.Total)) - - err = groupRepo.Unassign(context.Background(), group.ID, mid) - require.Nil(t, err, fmt.Sprintf("member unassign save unexpected error: %s", err)) - - mp, err = groupRepo.Members(context.Background(), group.ID, "things", pm) - require.Nil(t, err, fmt.Sprintf("members retrieve unexpected error: %s", err)) - assert.True(t, mp.Total == 1, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 1, mp.Total)) -} - -func cleanUp(t *testing.T) { - _, err := db.Exec("delete from group_relations") - require.Nil(t, err, fmt.Sprintf("clean relations unexpected error: %s", err)) - _, err = db.Exec("delete from groups") - require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) -} diff --git a/auth/postgres/init.go b/auth/postgres/init.go deleted file mode 100644 index fc6992f536..0000000000 --- a/auth/postgres/init.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import migrate "github.com/rubenv/sql-migrate" - -// Migration of Auth service -func Migration() *migrate.MemoryMigrationSource { - return &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ - { - Id: "auth_1", - Up: []string{ - `CREATE TABLE IF NOT EXISTS keys ( - id VARCHAR(254) NOT NULL, - type SMALLINT, - subject VARCHAR(254) NOT NULL, - issuer_id UUID NOT NULL, - issued_at TIMESTAMP NOT NULL, - expires_at TIMESTAMP, - PRIMARY KEY (id, issuer_id) - )`, - `CREATE EXTENSION IF NOT EXISTS LTREE`, - `CREATE TABLE IF NOT EXISTS groups ( - id VARCHAR(254) UNIQUE NOT NULL, - parent_id VARCHAR(254), - owner_id VARCHAR(254), - name VARCHAR(254) NOT NULL, - description VARCHAR(1024), - metadata JSONB, - path LTREE, - created_at TIMESTAMPTZ, - updated_at TIMESTAMPTZ, - UNIQUE (owner_id, name, parent_id), - FOREIGN KEY (parent_id) REFERENCES groups (id) ON DELETE CASCADE - )`, - `CREATE TABLE IF NOT EXISTS group_relations ( - member_id VARCHAR(254) NOT NULL, - group_id VARCHAR(254) NOT NULL, - type VARCHAR(254), - created_at TIMESTAMPTZ, - updated_at TIMESTAMPTZ, - FOREIGN KEY (group_id) REFERENCES groups (id), - PRIMARY KEY (member_id, group_id) - )`, - `CREATE INDEX path_gist_idx ON groups USING GIST (path);`, - `CREATE OR REPLACE FUNCTION inherit_group() - RETURNS trigger - LANGUAGE PLPGSQL - AS - $$ - BEGIN - IF NEW.parent_id IS NULL OR NEW.parent_id = '' THEN - RETURN NEW; - END IF; - IF NOT EXISTS (SELECT id FROM groups WHERE id = NEW.parent_id) THEN - RAISE EXCEPTION 'wrong parent id'; - END IF; - SELECT text2ltree(ltree2text(path) || '.' || NEW.id) INTO NEW.path FROM groups WHERE id = NEW.parent_id; - RETURN NEW; - END; - $$`, - `CREATE TRIGGER inherit_group_tr - BEFORE INSERT - ON groups - FOR EACH ROW - EXECUTE PROCEDURE inherit_group();`, - }, - Down: []string{ - `DROP TABLE IF EXISTS keys`, - `DROP EXTENSION IF EXISTS LTREE`, - `DROP TABLE IF EXISTS groups`, - `DROP TABLE IF EXISTS group_relations`, - `DROP FUNCTION IF EXISTS inherit_group`, - `DROP TRIGGER IF EXISTS inherit_group_tr ON groups`, - }, - }, - }, - } -} diff --git a/auth/postgres/key.go b/auth/postgres/key.go deleted file mode 100644 index caa55e5278..0000000000 --- a/auth/postgres/key.go +++ /dev/null @@ -1,173 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "fmt" - "strings" - "time" - - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" -) - -var ( - errSave = errors.New("failed to save key in database") - errRetrieve = errors.New("failed to retrieve key from database") - errDelete = errors.New("failed to delete key from database") -) -var _ auth.KeyRepository = (*repo)(nil) - -type repo struct { - db Database -} - -// New instantiates a PostgreSQL implementation of key repository. -func New(db Database) auth.KeyRepository { - return &repo{ - db: db, - } -} - -func (kr repo) Save(ctx context.Context, key auth.Key) (string, error) { - q := `INSERT INTO keys (id, type, issuer_id, subject, issued_at, expires_at) - VALUES (:id, :type, :issuer_id, :subject, :issued_at, :expires_at)` - - dbKey := toDBKey(key) - if _, err := kr.db.NamedExecContext(ctx, q, dbKey); err != nil { - - if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Code == pgerrcode.UniqueViolation { - return "", errors.Wrap(errors.ErrConflict, err) - } - - return "", errors.Wrap(errSave, err) - } - - return dbKey.ID, nil -} - -func (kr repo) RetrieveByID(ctx context.Context, issuerID, id string) (auth.Key, error) { - q := `SELECT id, type, issuer_id, subject, issued_at, expires_at FROM keys WHERE issuer_id = $1 AND id = $2` - key := dbKey{} - if err := kr.db.QueryRowxContext(ctx, q, issuerID, id).StructScan(&key); err != nil { - pgErr, ok := err.(*pgconn.PgError) - if err == sql.ErrNoRows || ok && pgerrcode.InvalidTextRepresentation == pgErr.Code { - return auth.Key{}, errors.Wrap(errors.ErrNotFound, err) - } - - return auth.Key{}, errors.Wrap(errRetrieve, err) - } - - return toKey(key), nil -} - -func (kr repo) RetrieveAll(ctx context.Context, issuerID string, pm auth.PageMetadata) (auth.KeyPage, error) { - var query []string - var emq string - query = append(query, fmt.Sprintf("issuer_id = '%s'", issuerID)) - if pm.Type != 0 { - query = append(query, fmt.Sprintf("type = '%d'", pm.Type)) - } - if pm.Subject != "" { - query = append(query, fmt.Sprintf("subject = '%s'", pm.Subject)) - } - if len(query) > 0 { - emq = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) - } - - q := fmt.Sprintf(`SELECT id, type, issuer_id, subject, issued_at, expires_at FROM keys %s ORDER BY issued_at LIMIT :limit OFFSET :offset;`, emq) - params := map[string]interface{}{ - "limit": pm.Limit, - "offset": pm.Offset, - } - - rows, err := kr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return auth.KeyPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - var items []auth.Key - for rows.Next() { - dbkey := dbKey{} - if err := rows.StructScan(&dbkey); err != nil { - return auth.KeyPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - key := toKey(dbkey) - items = append(items, key) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM keys %s;`, emq) - - total, err := total(ctx, kr.db, cq, params) - if err != nil { - return auth.KeyPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - page := auth.KeyPage{ - Keys: items, - PageMetadata: auth.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (kr repo) Remove(ctx context.Context, issuerID, id string) error { - q := `DELETE FROM keys WHERE issuer_id = :issuer_id AND id = :id` - key := dbKey{ - ID: id, - IssuerID: issuerID, - } - if _, err := kr.db.NamedExecContext(ctx, q, key); err != nil { - return errors.Wrap(errDelete, err) - } - - return nil -} - -type dbKey struct { - ID string `db:"id"` - Type uint32 `db:"type"` - IssuerID string `db:"issuer_id"` - Subject string `db:"subject"` - Revoked bool `db:"revoked"` - IssuedAt time.Time `db:"issued_at"` - ExpiresAt sql.NullTime `db:"expires_at"` -} - -func toDBKey(key auth.Key) dbKey { - ret := dbKey{ - ID: key.ID, - Type: key.Type, - IssuerID: key.IssuerID, - Subject: key.Subject, - IssuedAt: key.IssuedAt, - } - if !key.ExpiresAt.IsZero() { - ret.ExpiresAt = sql.NullTime{Time: key.ExpiresAt, Valid: true} - } - - return ret -} - -func toKey(key dbKey) auth.Key { - ret := auth.Key{ - ID: key.ID, - Type: key.Type, - IssuerID: key.IssuerID, - Subject: key.Subject, - IssuedAt: key.IssuedAt, - } - if key.ExpiresAt.Valid { - ret.ExpiresAt = key.ExpiresAt.Time - } - - return ret -} diff --git a/auth/postgres/key_test.go b/auth/postgres/key_test.go deleted file mode 100644 index e455def108..0000000000 --- a/auth/postgres/key_test.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/auth/postgres" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/ulid" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const email = "user-save@example.com" - -var ( - expTime = time.Now().Add(5 * time.Minute) - idProvider = uuid.New() - ulidProvider = ulid.New() -) - -func TestKeySave(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.New(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - key auth.Key - err error - }{ - { - desc: "save a new key", - key: auth.Key{ - Subject: email, - IssuedAt: time.Now(), - ExpiresAt: expTime, - ID: id, - IssuerID: id, - }, - err: nil, - }, - { - desc: "save with duplicate id", - key: auth.Key{ - Subject: email, - IssuedAt: time.Now(), - ExpiresAt: expTime, - ID: id, - IssuerID: id, - }, - err: errors.ErrConflict, - }, - } - - for _, tc := range cases { - _, err := repo.Save(context.Background(), tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestKeyRetrieveByID(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.New(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - key := auth.Key{ - Subject: email, - IssuedAt: time.Now(), - ExpiresAt: expTime, - ID: id, - IssuerID: id, - } - _, err = repo.Save(context.Background(), key) - assert.Nil(t, err, fmt.Sprintf("Storing Key expected to succeed: %s", err)) - cases := []struct { - desc string - id string - owner string - err error - }{ - { - desc: "retrieve an existing key", - id: key.ID, - owner: key.IssuerID, - err: nil, - }, - { - desc: "retrieve key with empty issuer id", - id: key.ID, - owner: "", - err: errors.ErrNotFound, - }, - { - desc: "retrieve non-existent key", - id: "", - owner: key.IssuerID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - _, err := repo.RetrieveByID(context.Background(), tc.owner, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestKeyRetrieveAll(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.New(dbMiddleware) - - issuerID1, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - issuerID2, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - n := uint64(100) - for i := uint64(0); i < n; i++ { - id, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key := auth.Key{ - Subject: fmt.Sprintf("key-%d@email.com", i), - IssuedAt: time.Now(), - ExpiresAt: expTime, - ID: id, - IssuerID: issuerID1, - Type: auth.LoginKey, - } - if i%10 == 0 { - key.Type = auth.APIKey - } - if i == n-1 { - key.IssuerID = issuerID2 - } - _, err = repo.Save(context.Background(), key) - assert.Nil(t, err, fmt.Sprintf("Storing Key expected to succeed: %s", err)) - } - - cases := map[string]struct { - owner string - pageMetadata auth.PageMetadata - size uint64 - }{ - "retrieve all keys": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - }, - size: n - 1, - }, - "retrieve all keys with different issuer ID": { - owner: issuerID2, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - }, - size: 1, - }, - "retrieve subset of keys with existing owner": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: n/2 - 1, - Limit: n, - Total: n, - }, - size: n / 2, - }, - "retrieve keys with existing subject": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Subject: "key-10@email.com", - }, - size: 1, - }, - "retrieve keys with non-existing subject": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Subject: "wrong", - Total: 0, - }, - size: 0, - }, - "retrieve keys with existing type": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Type: auth.APIKey, - }, - size: 10, - }, - "retrieve keys with non-existing type": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Total: 0, - Type: uint32(9), - }, - size: 0, - }, - "retrieve all keys with existing subject and type": { - owner: issuerID1, - pageMetadata: auth.PageMetadata{ - Offset: 0, - Limit: n, - Subject: "key-10@email.com", - Type: auth.APIKey, - }, - size: 1, - }, - } - - for desc, tc := range cases { - page, err := repo.RetrieveAll(context.Background(), tc.owner, tc.pageMetadata) - size := uint64(len(page.Keys)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) - // assert.Equal(t, tc.pageMetadata.Total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", desc, tc.pageMetadata.Total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) - } -} - -func TestKeyRemove(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.New(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - key := auth.Key{ - Subject: email, - IssuedAt: time.Now(), - ExpiresAt: expTime, - ID: id, - IssuerID: id, - } - _, err = repo.Save(opentracing.ContextWithSpan(context.Background(), opentracing.StartSpan("")), key) - assert.Nil(t, err, fmt.Sprintf("Storing Key expected to succeed: %s", err)) - cases := []struct { - desc string - id string - owner string - err error - }{ - { - desc: "remove an existing key", - id: key.ID, - owner: key.IssuerID, - err: nil, - }, - { - desc: "remove key that does not exist", - id: key.ID, - owner: key.IssuerID, - err: nil, - }, - } - - for _, tc := range cases { - err := repo.Remove(context.Background(), tc.owner, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} diff --git a/auth/postgres/setup_test.go b/auth/postgres/setup_test.go deleted file mode 100644 index d76bc174b7..0000000000 --- a/auth/postgres/setup_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres_test contains tests for PostgreSQL repository -// implementations. -package postgres_test - -import ( - "database/sql" - "fmt" - "log" - "os" - "testing" - - "github.com/jmoiron/sqlx" - authRepo "github.com/mainflux/mainflux/auth/postgres" - pgClient "github.com/mainflux/mainflux/internal/clients/postgres" - dockertest "github.com/ory/dockertest/v3" -) - -var db *sqlx.DB - -func TestMain(m *testing.M) { - pool, err := dockertest.NewPool("") - if err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - cfg := []string{ - "POSTGRES_USER=test", - "POSTGRES_PASSWORD=test", - "POSTGRES_DB=test", - } - container, err := pool.Run("postgres", "13.3-alpine", cfg) - if err != nil { - log.Fatalf("Could not start container: %s", err) - } - - port := container.GetPort("5432/tcp") - - if err := pool.Retry(func() error { - url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) - db, err := sql.Open("pgx", url) - if err != nil { - return err - } - return db.Ping() - }); err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - dbConfig := pgClient.Config{ - Host: "localhost", - Port: port, - User: "test", - Pass: "test", - Name: "test", - SSLMode: "disable", - SSLCert: "", - SSLKey: "", - SSLRootCert: "", - } - - if db, err = pgClient.SetupDB(dbConfig, *authRepo.Migration()); err != nil { - log.Fatalf("Could not setup test DB connection: %s", err) - } - - code := m.Run() - - // Defers will not be run when using os.Exit - db.Close() - if err := pool.Purge(container); err != nil { - log.Fatalf("Could not purge container: %s", err) - } - - os.Exit(code) -} diff --git a/auth/postgres/tracing.go b/auth/postgres/tracing.go deleted file mode 100644 index 6fe90a89c5..0000000000 --- a/auth/postgres/tracing.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import ( - "context" - "database/sql" - - "github.com/jmoiron/sqlx" - "github.com/opentracing/opentracing-go" -) - -var _ Database = (*database)(nil) - -type database struct { - db *sqlx.DB -} - -// Database provides a database interface -type Database interface { - NamedExecContext(context.Context, string, interface{}) (sql.Result, error) - QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row - QueryxContext(context.Context, string, ...interface{}) (*sqlx.Rows, error) - NamedQueryContext(context.Context, string, interface{}) (*sqlx.Rows, error) - BeginTxx(ctx context.Context, opts *sql.TxOptions) (*sqlx.Tx, error) -} - -// NewDatabase creates a ThingDatabase instance -func NewDatabase(db *sqlx.DB) Database { - return &database{ - db: db, - } -} - -func (d database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { - addSpanTags(ctx, query) - return d.db.NamedQueryContext(ctx, query, args) -} - -func (d database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { - addSpanTags(ctx, query) - return d.db.NamedExecContext(ctx, query, args) -} - -func (d database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { - addSpanTags(ctx, query) - return d.db.QueryRowxContext(ctx, query, args...) -} - -func (d database) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { - addSpanTags(ctx, query) - return d.db.QueryxContext(ctx, query, args...) -} - -func (d database) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*sqlx.Tx, error) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } - return d.db.BeginTxx(ctx, opts) -} - -func addSpanTags(ctx context.Context, query string) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("sql.statement", query) - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } -} diff --git a/auth/service.go b/auth/service.go deleted file mode 100644 index ff3a1639fd..0000000000 --- a/auth/service.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth - -import ( - "context" - "fmt" - "time" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/ulid" -) - -const ( - recoveryDuration = 5 * time.Minute - thingsGroupType = "things" - - authoritiesObject = "authorities" - memberRelation = "member" -) - -var ( - // ErrFailedToRetrieveMembers failed to retrieve group members. - ErrFailedToRetrieveMembers = errors.New("failed to retrieve group members") - - // ErrFailedToRetrieveMembership failed to retrieve memberships - ErrFailedToRetrieveMembership = errors.New("failed to retrieve memberships") - - // ErrFailedToRetrieveAll failed to retrieve groups. - ErrFailedToRetrieveAll = errors.New("failed to retrieve all groups") - - // ErrFailedToRetrieveParents failed to retrieve groups. - ErrFailedToRetrieveParents = errors.New("failed to retrieve all groups") - - // ErrFailedToRetrieveChildren failed to retrieve groups. - ErrFailedToRetrieveChildren = errors.New("failed to retrieve all groups") - - errIssueUser = errors.New("failed to issue new login key") - errIssueTmp = errors.New("failed to issue new temporary key") - errRevoke = errors.New("failed to remove key") - errRetrieve = errors.New("failed to retrieve key data") - errIdentify = errors.New("failed to validate token") -) - -// Authn specifies an API that must be fullfiled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -// Token is a string value of the actual Key and is used to authenticate -// an Auth service request. -type Authn interface { - // Issue issues a new Key, returning its token value alongside. - Issue(ctx context.Context, token string, key Key) (Key, string, error) - - // Revoke removes the Key with the provided id that is - // issued by the user identified by the provided key. - Revoke(ctx context.Context, token, id string) error - - // RetrieveKey retrieves data for the Key identified by the provided - // ID, that is issued by the user identified by the provided key. - RetrieveKey(ctx context.Context, token, id string) (Key, error) - - // RetrieveKeys retrieves data for the Keys that are - // issued by the user identified by the provided key. - RetrieveKeys(ctx context.Context, token string, pm PageMetadata) (KeyPage, error) - - // Identify validates token token. If token is valid, content - // is returned. If token is invalid, or invocation failed for some - // other reason, non-nil error value is returned in response. - Identify(ctx context.Context, token string) (Identity, error) -} - -// Service specifies an API that must be fulfilled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -// Token is a string value of the actual Key and is used to authenticate -// an Auth service request. -type Service interface { - Authn - Authz - - // GroupService implements groups API, creating groups, assigning members - GroupService -} - -var _ Service = (*service)(nil) - -type service struct { - keys KeyRepository - groups GroupRepository - idProvider mainflux.IDProvider - ulidProvider mainflux.IDProvider - agent PolicyAgent - tokenizer Tokenizer - loginDuration time.Duration -} - -// New instantiates the auth service implementation. -func New(keys KeyRepository, groups GroupRepository, idp mainflux.IDProvider, tokenizer Tokenizer, policyAgent PolicyAgent, duration time.Duration) Service { - return &service{ - tokenizer: tokenizer, - keys: keys, - groups: groups, - idProvider: idp, - ulidProvider: ulid.New(), - agent: policyAgent, - loginDuration: duration, - } -} - -func (svc service) Issue(ctx context.Context, token string, key Key) (Key, string, error) { - if key.IssuedAt.IsZero() { - return Key{}, "", ErrInvalidKeyIssuedAt - } - switch key.Type { - case APIKey: - return svc.userKey(ctx, token, key) - case RecoveryKey: - return svc.tmpKey(recoveryDuration, key) - default: - return svc.tmpKey(svc.loginDuration, key) - } -} - -func (svc service) Revoke(ctx context.Context, token, id string) error { - issuerID, _, err := svc.login(token) - if err != nil { - return errors.Wrap(errRevoke, err) - } - if err := svc.keys.Remove(ctx, issuerID, id); err != nil { - return errors.Wrap(errRevoke, err) - } - return nil -} - -func (svc service) RetrieveKey(ctx context.Context, token, id string) (Key, error) { - issuerID, _, err := svc.login(token) - if err != nil { - return Key{}, errors.Wrap(errRetrieve, err) - } - - return svc.keys.RetrieveByID(ctx, issuerID, id) -} - -func (svc service) RetrieveKeys(ctx context.Context, token string, pm PageMetadata) (KeyPage, error) { - issuerID, _, err := svc.login(token) - if err != nil { - return KeyPage{}, errors.Wrap(errRetrieve, err) - } - - return svc.keys.RetrieveAll(ctx, issuerID, pm) -} - -func (svc service) Identify(ctx context.Context, token string) (Identity, error) { - key, err := svc.tokenizer.Parse(token) - if err == ErrAPIKeyExpired { - err = svc.keys.Remove(ctx, key.IssuerID, key.ID) - return Identity{}, errors.Wrap(ErrAPIKeyExpired, err) - } - if err != nil { - return Identity{}, errors.Wrap(errIdentify, err) - } - - switch key.Type { - case RecoveryKey, LoginKey: - return Identity{ID: key.IssuerID, Email: key.Subject}, nil - case APIKey: - _, err := svc.keys.RetrieveByID(context.TODO(), key.IssuerID, key.ID) - if err != nil { - return Identity{}, errors.ErrAuthentication - } - return Identity{ID: key.IssuerID, Email: key.Subject}, nil - default: - return Identity{}, errors.ErrAuthentication - } -} - -func (svc service) Authorize(ctx context.Context, pr PolicyReq) error { - return svc.agent.CheckPolicy(ctx, pr) -} - -func (svc service) AddPolicy(ctx context.Context, pr PolicyReq) error { - return svc.agent.AddPolicy(ctx, pr) -} - -func (svc service) AddPolicies(ctx context.Context, token, object string, subjectIDs, relations []string) error { - user, err := svc.Identify(ctx, token) - if err != nil { - return err - } - - if err := svc.Authorize(ctx, PolicyReq{Object: authoritiesObject, Relation: memberRelation, Subject: user.ID}); err != nil { - return err - } - - var errs error - for _, subjectID := range subjectIDs { - for _, relation := range relations { - if err := svc.AddPolicy(ctx, PolicyReq{Object: object, Relation: relation, Subject: subjectID}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot add '%s' policy on object '%s' for subject '%s': %w", relation, object, subjectID, err), errs) - } - } - } - return errs -} - -func (svc service) DeletePolicy(ctx context.Context, pr PolicyReq) error { - return svc.agent.DeletePolicy(ctx, pr) -} - -func (svc service) DeletePolicies(ctx context.Context, token, object string, subjectIDs, relations []string) error { - user, err := svc.Identify(ctx, token) - if err != nil { - return err - } - - // Check if the user identified by token is the admin. - if err := svc.Authorize(ctx, PolicyReq{Object: authoritiesObject, Relation: memberRelation, Subject: user.ID}); err != nil { - return err - } - - var errs error - for _, subjectID := range subjectIDs { - for _, relation := range relations { - if err := svc.DeletePolicy(ctx, PolicyReq{Object: object, Relation: relation, Subject: subjectID}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot delete '%s' policy on object '%s' for subject '%s': %w", relation, object, subjectID, err), errs) - } - } - } - return errs -} - -func (svc service) AssignGroupAccessRights(ctx context.Context, token, thingGroupID, userGroupID string) error { - if _, err := svc.Identify(ctx, token); err != nil { - return err - } - return svc.agent.AddPolicy(ctx, PolicyReq{Object: thingGroupID, Relation: memberRelation, Subject: fmt.Sprintf("%s:%s#%s", "members", userGroupID, memberRelation)}) -} - -func (svc service) ListPolicies(ctx context.Context, pr PolicyReq) (PolicyPage, error) { - res, err := svc.agent.RetrievePolicies(ctx, pr) - if err != nil { - return PolicyPage{}, err - } - var page PolicyPage - for _, tuple := range res { - page.Policies = append(page.Policies, tuple.GetObject()) - } - return page, err -} - -func (svc service) tmpKey(duration time.Duration, key Key) (Key, string, error) { - key.ExpiresAt = key.IssuedAt.Add(duration) - secret, err := svc.tokenizer.Issue(key) - if err != nil { - return Key{}, "", errors.Wrap(errIssueTmp, err) - } - - return key, secret, nil -} - -func (svc service) userKey(ctx context.Context, token string, key Key) (Key, string, error) { - id, sub, err := svc.login(token) - if err != nil { - return Key{}, "", errors.Wrap(errIssueUser, err) - } - - key.IssuerID = id - if key.Subject == "" { - key.Subject = sub - } - - keyID, err := svc.idProvider.ID() - if err != nil { - return Key{}, "", errors.Wrap(errIssueUser, err) - } - key.ID = keyID - - if _, err := svc.keys.Save(ctx, key); err != nil { - return Key{}, "", errors.Wrap(errIssueUser, err) - } - - secret, err := svc.tokenizer.Issue(key) - if err != nil { - return Key{}, "", errors.Wrap(errIssueUser, err) - } - - return key, secret, nil -} - -func (svc service) login(token string) (string, string, error) { - key, err := svc.tokenizer.Parse(token) - if err != nil { - return "", "", err - } - // Only login key token is valid for login. - if key.Type != LoginKey || key.IssuerID == "" { - return "", "", errors.ErrAuthentication - } - - return key.IssuerID, key.Subject, nil -} - -func (svc service) CreateGroup(ctx context.Context, token string, group Group) (Group, error) { - user, err := svc.Identify(ctx, token) - if err != nil { - return Group{}, err - } - - ulid, err := svc.ulidProvider.ID() - if err != nil { - return Group{}, err - } - - timestamp := getTimestmap() - group.UpdatedAt = timestamp - group.CreatedAt = timestamp - - group.ID = ulid - group.OwnerID = user.ID - - group, err = svc.groups.Save(ctx, group) - if err != nil { - return Group{}, err - } - - if err := svc.agent.AddPolicy(ctx, PolicyReq{Object: group.ID, Relation: memberRelation, Subject: user.ID}); err != nil { - return Group{}, err - } - - return group, nil -} - -func (svc service) ListGroups(ctx context.Context, token string, pm PageMetadata) (GroupPage, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return GroupPage{}, err - } - return svc.groups.RetrieveAll(ctx, pm) -} - -func (svc service) ListParents(ctx context.Context, token string, childID string, pm PageMetadata) (GroupPage, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return GroupPage{}, err - } - return svc.groups.RetrieveAllParents(ctx, childID, pm) -} - -func (svc service) ListChildren(ctx context.Context, token string, parentID string, pm PageMetadata) (GroupPage, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return GroupPage{}, err - } - return svc.groups.RetrieveAllChildren(ctx, parentID, pm) -} - -func (svc service) ListMembers(ctx context.Context, token string, groupID, groupType string, pm PageMetadata) (MemberPage, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return MemberPage{}, err - } - mp, err := svc.groups.Members(ctx, groupID, groupType, pm) - if err != nil { - return MemberPage{}, errors.Wrap(ErrFailedToRetrieveMembers, err) - } - return mp, nil -} - -func (svc service) RemoveGroup(ctx context.Context, token, id string) error { - if _, err := svc.Identify(ctx, token); err != nil { - return err - } - return svc.groups.Delete(ctx, id) -} - -func (svc service) UpdateGroup(ctx context.Context, token string, group Group) (Group, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return Group{}, err - } - - group.UpdatedAt = getTimestmap() - return svc.groups.Update(ctx, group) -} - -func (svc service) ViewGroup(ctx context.Context, token, id string) (Group, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return Group{}, err - } - return svc.groups.RetrieveByID(ctx, id) -} - -func (svc service) Assign(ctx context.Context, token string, groupID, groupType string, memberIDs ...string) error { - if _, err := svc.Identify(ctx, token); err != nil { - return err - } - - if err := svc.groups.Assign(ctx, groupID, groupType, memberIDs...); err != nil { - return err - } - - if groupType == thingsGroupType { - ss := fmt.Sprintf("%s:%s#%s", "members", groupID, memberRelation) - var errs error - for _, memberID := range memberIDs { - for _, action := range []string{"read", "write", "delete"} { - if err := svc.agent.AddPolicy(ctx, PolicyReq{Object: memberID, Relation: action, Subject: ss}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot add thing: '%s' to thing group: '%s'", memberID, groupID), errs) - } - } - } - return errs - } - - var errs error - for _, memberID := range memberIDs { - if err := svc.agent.AddPolicy(ctx, PolicyReq{Object: groupID, Relation: memberRelation, Subject: memberID}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot add user: '%s' to user group: '%s'", memberID, groupID), errs) - } - } - return errs -} - -func (svc service) Unassign(ctx context.Context, token string, groupID string, memberIDs ...string) error { - if _, err := svc.Identify(ctx, token); err != nil { - return err - } - - ss := fmt.Sprintf("%s:%s#%s", "members", groupID, memberRelation) - var errs error - for _, memberID := range memberIDs { - // If the member is a user, #member@memberID must be deleted. - if err := svc.agent.DeletePolicy(ctx, PolicyReq{Object: groupID, Relation: memberRelation, Subject: memberID}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot delete a membership of member '%s' from group '%s'", memberID, groupID), errs) - } - - // If the member is a Thing, memberID#read|write|delete@(members:groupID#member) must be deleted. - for _, action := range []string{"read", "write", "delete"} { - if err := svc.agent.DeletePolicy(ctx, PolicyReq{Object: memberID, Relation: action, Subject: ss}); err != nil { - errs = errors.Wrap(fmt.Errorf("cannot delete '%s' policy from member '%s'", action, memberID), errs) - } - } - } - - err := svc.groups.Unassign(ctx, groupID, memberIDs...) - return errors.Wrap(err, errs) -} - -func (svc service) ListMemberships(ctx context.Context, token string, memberID string, pm PageMetadata) (GroupPage, error) { - if _, err := svc.Identify(ctx, token); err != nil { - return GroupPage{}, err - } - return svc.groups.Memberships(ctx, memberID, pm) -} - -func getTimestmap() time.Time { - return time.Now().UTC().Round(time.Millisecond) -} diff --git a/auth/service_test.go b/auth/service_test.go deleted file mode 100644 index cddf3b9a87..0000000000 --- a/auth/service_test.go +++ /dev/null @@ -1,1330 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/mocks" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var idProvider = uuid.New() - -const ( - secret = "secret" - email = "test@example.com" - id = "testID" - groupName = "mfx" - description = "Description" - read = "read" - - memberRelation = "member" - authoritiesObj = "authorities" - loginDuration = 30 * time.Minute -) - -func newService() auth.Service { - repo := mocks.NewKeyRepository() - groupRepo := mocks.NewGroupRepository() - idProvider := uuid.NewMock() - - mockAuthzDB := map[string][]mocks.MockSubjectSet{} - mockAuthzDB[id] = append(mockAuthzDB[id], mocks.MockSubjectSet{Object: authoritiesObj, Relation: memberRelation}) - ketoMock := mocks.NewKetoMock(mockAuthzDB) - - t := jwt.New(secret) - return auth.New(repo, groupRepo, idProvider, t, ketoMock, loginDuration) -} - -func TestIssue(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - cases := []struct { - desc string - key auth.Key - token string - err error - }{ - { - desc: "issue login key", - key: auth.Key{ - Type: auth.LoginKey, - IssuedAt: time.Now(), - }, - token: secret, - err: nil, - }, - { - desc: "issue login key with no time", - key: auth.Key{ - Type: auth.LoginKey, - }, - token: secret, - err: auth.ErrInvalidKeyIssuedAt, - }, - { - desc: "issue API key", - key: auth.Key{ - Type: auth.APIKey, - IssuedAt: time.Now(), - }, - token: secret, - err: nil, - }, - { - desc: "issue API key with an invalid token", - key: auth.Key{ - Type: auth.APIKey, - IssuedAt: time.Now(), - }, - token: "invalid", - err: errors.ErrAuthentication, - }, - { - desc: "issue API key with no time", - key: auth.Key{ - Type: auth.APIKey, - }, - token: secret, - err: auth.ErrInvalidKeyIssuedAt, - }, - { - desc: "issue recovery key", - key: auth.Key{ - Type: auth.RecoveryKey, - IssuedAt: time.Now(), - }, - token: "", - err: nil, - }, - { - desc: "issue recovery with no issue time", - key: auth.Key{ - Type: auth.RecoveryKey, - }, - token: secret, - err: auth.ErrInvalidKeyIssuedAt, - }, - } - - for _, tc := range cases { - _, _, err := svc.Issue(context.Background(), tc.token, tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestRevoke(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - require.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - key := auth.Key{ - Type: auth.APIKey, - IssuedAt: time.Now(), - IssuerID: id, - Subject: email, - } - newKey, _, err := svc.Issue(context.Background(), secret, key) - require.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - cases := []struct { - desc string - id string - token string - err error - }{ - { - desc: "revoke login key", - id: newKey.ID, - token: secret, - err: nil, - }, - { - desc: "revoke non-existing login key", - id: newKey.ID, - token: secret, - err: nil, - }, - { - desc: "revoke with empty login key", - id: newKey.ID, - token: "", - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - err := svc.Revoke(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestRetrieve(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), Subject: email, IssuerID: id}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, userToken, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - apiKey, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing login's key expected to succeed: %s", err)) - - _, resetToken, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.RecoveryKey, IssuedAt: time.Now()}) - assert.Nil(t, err, fmt.Sprintf("Issuing reset key expected to succeed: %s", err)) - - cases := []struct { - desc string - id string - token string - err error - }{ - { - desc: "retrieve login key", - id: apiKey.ID, - token: userToken, - err: nil, - }, - { - desc: "retrieve non-existing login key", - id: "invalid", - token: userToken, - err: errors.ErrNotFound, - }, - { - desc: "retrieve with wrong login key", - id: apiKey.ID, - token: "wrong", - err: errors.ErrAuthentication, - }, - { - desc: "retrieve with API token", - id: apiKey.ID, - token: apiToken, - err: errors.ErrAuthentication, - }, - { - desc: "retrieve with reset token", - id: apiKey.ID, - token: resetToken, - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - _, err := svc.RetrieveKey(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestRetrieveAll(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - n := uint64(100) - for i := uint64(0); i < n; i++ { - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: fmt.Sprintf("email-%d@mail.com", i), - IssuedAt: time.Now(), - } - _, _, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - } - - cases := map[string]struct { - token string - size uint64 - pm auth.PageMetadata - err error - }{ - "list all keys": { - token: secret, - pm: auth.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - }, - size: n, - err: nil, - }, - "list all keys with offset": { - token: secret, - pm: auth.PageMetadata{ - Offset: 50, - Limit: n, - Total: n, - }, - size: 50, - err: nil, - }, - "list all keys with wrong token": { - token: "wrongToken", - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.RetrieveKeys(context.Background(), tc.token, tc.pm) - size := uint64(len(page.Keys)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } - -} - -func TestIdentify(t *testing.T) { - svc := newService() - - _, loginSecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - _, recoverySecret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.RecoveryKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing reset key expected to succeed: %s", err)) - - _, apiSecret, err := svc.Issue(context.Background(), loginSecret, auth.Key{Type: auth.APIKey, IssuerID: id, Subject: email, IssuedAt: time.Now(), ExpiresAt: time.Now().Add(time.Minute)}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - exp1 := time.Now().Add(-2 * time.Second) - _, expSecret, err := svc.Issue(context.Background(), loginSecret, auth.Key{Type: auth.APIKey, IssuedAt: time.Now(), ExpiresAt: exp1}) - assert.Nil(t, err, fmt.Sprintf("Issuing expired login key expected to succeed: %s", err)) - - _, invalidSecret, err := svc.Issue(context.Background(), loginSecret, auth.Key{Type: 22, IssuedAt: time.Now()}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - cases := []struct { - desc string - key string - idt auth.Identity - err error - }{ - { - desc: "identify login key", - key: loginSecret, - idt: auth.Identity{id, email}, - err: nil, - }, - { - desc: "identify recovery key", - key: recoverySecret, - idt: auth.Identity{id, email}, - err: nil, - }, - { - desc: "identify API key", - key: apiSecret, - idt: auth.Identity{id, email}, - err: nil, - }, - { - desc: "identify expired API key", - key: expSecret, - idt: auth.Identity{}, - err: auth.ErrAPIKeyExpired, - }, - { - desc: "identify expired key", - key: invalidSecret, - idt: auth.Identity{}, - err: errors.ErrAuthentication, - }, - { - desc: "identify invalid key", - key: "invalid", - idt: auth.Identity{}, - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - idt, err := svc.Identify(context.Background(), tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.idt, idt, fmt.Sprintf("%s expected %s got %s\n", tc.desc, tc.idt, idt)) - } -} - -func TestCreateGroup(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Name: "Group", - Description: description, - } - - parentGroup := auth.Group{ - Name: "ParentGroup", - Description: description, - } - - parent, err := svc.CreateGroup(context.Background(), apiToken, parentGroup) - assert.Nil(t, err, fmt.Sprintf("Creating parent group expected to succeed: %s", err)) - - err = svc.Authorize(context.Background(), auth.PolicyReq{Object: parent.ID, Relation: memberRelation, Subject: id}) - assert.Nil(t, err, fmt.Sprintf("Checking parent group owner's policy expected to succeed: %s", err)) - - cases := []struct { - desc string - group auth.Group - err error - }{ - { - desc: "create new group", - group: group, - err: nil, - }, - { - desc: "create group with existing name", - group: group, - err: nil, - }, - { - desc: "create group with parent", - group: auth.Group{ - Name: groupName, - ParentID: parent.ID, - }, - err: nil, - }, - { - desc: "create group with invalid parent", - group: auth.Group{ - Name: groupName, - ParentID: "xxxxxxxxxx", - }, - err: errors.ErrCreateEntity, - }, - } - - for _, tc := range cases { - g, err := svc.CreateGroup(context.Background(), apiToken, tc.group) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - if err == nil { - authzErr := svc.Authorize(context.Background(), auth.PolicyReq{Object: g.ID, Relation: memberRelation, Subject: g.OwnerID}) - assert.Nil(t, authzErr, fmt.Sprintf("%s - Checking group owner's policy expected to succeed: %s", tc.desc, authzErr)) - } - } -} - -func TestUpdateGroup(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Name: "Group", - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - - group, err = svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("Creating parent group failed: %s", err)) - - cases := []struct { - desc string - group auth.Group - err error - }{ - { - desc: "update group", - group: auth.Group{ - ID: group.ID, - Name: "NewName", - Description: "NewDescription", - Metadata: auth.GroupMetadata{ - "field": "value2", - }, - }, - err: nil, - }, - } - - for _, tc := range cases { - g, err := svc.UpdateGroup(context.Background(), apiToken, tc.group) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, g.ID, tc.group.ID, fmt.Sprintf("ID: expected %s got %s\n", g.ID, tc.group.ID)) - assert.Equal(t, g.Name, tc.group.Name, fmt.Sprintf("Name: expected %s got %s\n", g.Name, tc.group.Name)) - assert.Equal(t, g.Description, tc.group.Description, fmt.Sprintf("Description: expected %s got %s\n", g.Description, tc.group.Description)) - assert.Equal(t, g.Metadata["field"], g.Metadata["field"], fmt.Sprintf("Metadata: expected %s got %s\n", g.Metadata, tc.group.Metadata)) - } - -} - -func TestViewGroup(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Name: "Group", - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - - group, err = svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("Creating parent group failed: %s", err)) - - cases := []struct { - desc string - token string - groupID string - err error - }{ - { - - desc: "view group", - token: apiToken, - groupID: group.ID, - err: nil, - }, - { - desc: "view group with invalid token", - token: "wrongtoken", - groupID: group.ID, - err: errors.ErrAuthentication, - }, - { - desc: "view group for wrong id", - token: apiToken, - groupID: "wrong", - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - _, err := svc.ViewGroup(context.Background(), tc.token, tc.groupID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestListGroups(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - n := uint64(10) - parentID := "" - for i := uint64(0); i < n; i++ { - group.Name = fmt.Sprintf("Group%d", i) - group.ParentID = parentID - g, err := svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = g.ID - } - - cases := map[string]struct { - token string - level uint64 - size uint64 - metadata auth.GroupMetadata - err error - }{ - "list all groups": { - token: apiToken, - level: 5, - size: n, - err: nil, - }, - "list groups for level 1": { - token: apiToken, - level: 1, - size: n, - err: nil, - }, - "list all groups with wrong token": { - token: "wrongToken", - level: 5, - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.ListGroups(context.Background(), tc.token, auth.PageMetadata{Level: tc.level, Metadata: tc.metadata}) - size := uint64(len(page.Groups)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } - -} - -func TestListChildren(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - n := uint64(10) - parentID := "" - groupIDs := make([]string, n) - for i := uint64(0); i < n; i++ { - group.Name = fmt.Sprintf("Group%d", i) - group.ParentID = parentID - g, err := svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = g.ID - groupIDs[i] = g.ID - } - - cases := map[string]struct { - token string - level uint64 - size uint64 - id string - metadata auth.GroupMetadata - err error - }{ - "list all children": { - token: apiToken, - level: 5, - id: groupIDs[0], - size: n, - err: nil, - }, - "list all groups with wrong token": { - token: "wrongToken", - level: 5, - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.ListChildren(context.Background(), tc.token, tc.id, auth.PageMetadata{Level: tc.level, Metadata: tc.metadata}) - size := uint64(len(page.Groups)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } -} - -func TestListParents(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - n := uint64(10) - parentID := "" - groupIDs := make([]string, n) - for i := uint64(0); i < n; i++ { - group.Name = fmt.Sprintf("Group%d", i) - group.ParentID = parentID - g, err := svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - parentID = g.ID - groupIDs[i] = g.ID - } - - cases := map[string]struct { - token string - level uint64 - size uint64 - id string - metadata auth.GroupMetadata - err error - }{ - "list all parents": { - token: apiToken, - level: 5, - id: groupIDs[n-1], - size: n, - err: nil, - }, - "list all parents with wrong token": { - token: "wrongToken", - level: 5, - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.ListParents(context.Background(), tc.token, tc.id, auth.PageMetadata{Level: tc.level, Metadata: tc.metadata}) - size := uint64(len(page.Groups)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } -} - -func TestListMembers(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - g, err := svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("Creating group expected to succeed: %s", err)) - group.ID = g.ID - - n := uint64(10) - for i := uint64(0); i < n; i++ { - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - err = svc.Assign(context.Background(), apiToken, group.ID, "things", uid) - assert.Nil(t, err, fmt.Sprintf("Assign member expected to succeed: %s\n", err)) - } - - cases := map[string]struct { - token string - size uint64 - offset uint64 - limit uint64 - group auth.Group - metadata auth.GroupMetadata - err error - }{ - "list all members": { - token: apiToken, - offset: 0, - limit: n, - group: group, - size: n, - err: nil, - }, - "list half members": { - token: apiToken, - offset: n / 2, - limit: n, - group: group, - size: n / 2, - err: nil, - }, - "list all members with wrong token": { - token: "wrongToken", - offset: 0, - limit: n, - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.ListMembers(context.Background(), tc.token, tc.group.ID, "things", auth.PageMetadata{Offset: tc.offset, Limit: tc.limit, Metadata: tc.metadata}) - size := uint64(len(page.Members)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } - -} - -func TestListMemberships(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - group := auth.Group{ - Description: description, - Metadata: auth.GroupMetadata{ - "field": "value", - }, - } - - memberID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - n := uint64(10) - for i := uint64(0); i < n; i++ { - group.Name = fmt.Sprintf("Group%d", i) - g, err := svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - _ = svc.AddPolicy(context.Background(), auth.PolicyReq{Subject: id, Object: memberID, Relation: "owner"}) - err = svc.Assign(context.Background(), apiToken, g.ID, "things", memberID) - assert.Nil(t, err, fmt.Sprintf("Assign member expected to succeed: %s\n", err)) - } - - cases := map[string]struct { - token string - size uint64 - offset uint64 - limit uint64 - group auth.Group - metadata auth.GroupMetadata - err error - }{ - "list all members": { - token: apiToken, - offset: 0, - limit: n, - group: group, - size: n, - err: nil, - }, - "list half members": { - token: apiToken, - offset: n / 2, - limit: n, - group: group, - size: n / 2, - err: nil, - }, - "list all members with wrong token": { - token: "wrongToken", - offset: 0, - limit: n, - size: 0, - err: errors.ErrAuthentication, - }, - } - - for desc, tc := range cases { - page, err := svc.ListMemberships(context.Background(), tc.token, memberID, auth.PageMetadata{Limit: tc.limit, Offset: tc.offset, Metadata: tc.metadata}) - size := uint64(len(page.Groups)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } -} - -func TestRemoveGroup(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - group := auth.Group{ - Name: groupName, - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - group, err = svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - err = svc.RemoveGroup(context.Background(), "wrongToken", group.ID) - assert.True(t, errors.Contains(err, errors.ErrAuthentication), fmt.Sprintf("Unauthorized access: expected %v got %v", errors.ErrAuthentication, err)) - - err = svc.RemoveGroup(context.Background(), apiToken, "wrongID") - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Remove group with wrong id: expected %v got %v", errors.ErrNotFound, err)) - - gp, err := svc.ListGroups(context.Background(), apiToken, auth.PageMetadata{Level: auth.MaxLevel}) - assert.Nil(t, err, fmt.Sprintf("list groups unexpected error: %s", err)) - assert.True(t, gp.Total == 1, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 1, gp.Total)) - - err = svc.RemoveGroup(context.Background(), apiToken, group.ID) - assert.True(t, errors.Contains(err, nil), fmt.Sprintf("Unauthorized access: expected %v got %v", nil, err)) - - gp, err = svc.ListGroups(context.Background(), apiToken, auth.PageMetadata{Level: auth.MaxLevel}) - assert.Nil(t, err, fmt.Sprintf("list groups save unexpected error: %s", err)) - assert.True(t, gp.Total == 0, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 0, gp.Total)) - -} - -func TestAssign(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - group := auth.Group{ - Name: groupName + "Updated", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - group, err = svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - mid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - err = svc.Assign(context.Background(), apiToken, group.ID, "things", mid) - assert.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - - // check access control policies things members. - subjectSet := fmt.Sprintf("%s:%s#%s", "members", group.ID, memberRelation) - err = svc.Authorize(context.Background(), auth.PolicyReq{Object: mid, Relation: read, Subject: subjectSet}) - assert.Nil(t, err, fmt.Sprintf("entites having an access to group %s must have %s policy on %s: %s", group.ID, read, mid, err)) - err = svc.Authorize(context.Background(), auth.PolicyReq{Object: mid, Relation: "write", Subject: subjectSet}) - assert.Nil(t, err, fmt.Sprintf("entites having an access to group %s must have %s policy on %s: %s", group.ID, "write", mid, err)) - err = svc.Authorize(context.Background(), auth.PolicyReq{Object: mid, Relation: "delete", Subject: subjectSet}) - assert.Nil(t, err, fmt.Sprintf("entites having an access to group %s must have %s policy on %s: %s", group.ID, "delete", mid, err)) - - mp, err := svc.ListMembers(context.Background(), apiToken, group.ID, "things", auth.PageMetadata{Offset: 0, Limit: 10}) - assert.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - assert.True(t, mp.Total == 1, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 1, mp.Total)) - - err = svc.Assign(context.Background(), "wrongToken", group.ID, "things", mid) - assert.True(t, errors.Contains(err, errors.ErrAuthentication), fmt.Sprintf("Unauthorized access: expected %v got %v", errors.ErrAuthentication, err)) - -} - -func TestUnassign(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - creationTime := time.Now().UTC() - group := auth.Group{ - Name: groupName + "Updated", - OwnerID: uid, - CreatedAt: creationTime, - UpdatedAt: creationTime, - } - - group, err = svc.CreateGroup(context.Background(), apiToken, group) - assert.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) - - mid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - err = svc.Assign(context.Background(), apiToken, group.ID, "things", mid) - assert.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - - mp, err := svc.ListMembers(context.Background(), apiToken, group.ID, "things", auth.PageMetadata{Limit: 10, Offset: 0}) - assert.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - assert.True(t, mp.Total == 1, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 1, mp.Total)) - - err = svc.Unassign(context.Background(), apiToken, group.ID, mid) - assert.Nil(t, err, fmt.Sprintf("member unassign save unexpected error: %s", err)) - - mp, err = svc.ListMembers(context.Background(), apiToken, group.ID, "things", auth.PageMetadata{Limit: 10, Offset: 0}) - assert.Nil(t, err, fmt.Sprintf("member assign save unexpected error: %s", err)) - assert.True(t, mp.Total == 0, fmt.Sprintf("retrieve members of a group: expected %d got %d\n", 0, mp.Total)) - - err = svc.Unassign(context.Background(), "wrongToken", group.ID, mid) - assert.True(t, errors.Contains(err, errors.ErrAuthentication), fmt.Sprintf("Unauthorized access: expected %v got %v", errors.ErrAuthentication, err)) - - err = svc.Unassign(context.Background(), apiToken, group.ID, mid) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Unauthorized access: expected %v got %v", nil, err)) -} - -func TestAuthorize(t *testing.T) { - svc := newService() - - pr := auth.PolicyReq{Object: authoritiesObj, Relation: memberRelation, Subject: id} - err := svc.Authorize(context.Background(), pr) - assert.Nil(t, err, fmt.Sprintf("authorizing initial %v policy expected to succeed: %s", pr, err)) -} - -func TestAddPolicy(t *testing.T) { - svc := newService() - - pr := auth.PolicyReq{Object: "obj", Relation: "rel", Subject: "sub"} - err := svc.AddPolicy(context.Background(), pr) - assert.Nil(t, err, fmt.Sprintf("adding %v policy expected to succeed: %v", pr, err)) - - err = svc.Authorize(context.Background(), pr) - assert.Nil(t, err, fmt.Sprintf("checking shared %v policy expected to be succeed: %#v", pr, err)) -} - -func TestDeletePolicy(t *testing.T) { - svc := newService() - - pr := auth.PolicyReq{Object: authoritiesObj, Relation: memberRelation, Subject: id} - err := svc.DeletePolicy(context.Background(), pr) - assert.Nil(t, err, fmt.Sprintf("deleting %v policy expected to succeed: %s", pr, err)) -} - -func TestAssignAccessRights(t *testing.T) { - svc := newService() - - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - userGroupID := "user-group" - thingGroupID := "thing-group" - err = svc.AssignGroupAccessRights(context.Background(), apiToken, thingGroupID, userGroupID) - assert.Nil(t, err, fmt.Sprintf("sharing the user group with thing group expected to succeed: %v", err)) - - err = svc.Authorize(context.Background(), auth.PolicyReq{Object: thingGroupID, Relation: memberRelation, Subject: fmt.Sprintf("%s:%s#%s", "members", userGroupID, memberRelation)}) - assert.Nil(t, err, fmt.Sprintf("checking shared group access policy expected to be succeed: %#v", err)) -} - -func TestAddPolicies(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - thingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - tmpID := "tmpid" - readPolicy := read - writePolicy := "write" - deletePolicy := "delete" - - // Add read policy to users. - err = svc.AddPolicies(context.Background(), apiToken, thingID, []string{id, tmpID}, []string{readPolicy}) - assert.Nil(t, err, fmt.Sprintf("adding policies expected to succeed: %s", err)) - - // Add write and delete policies to users. - err = svc.AddPolicies(context.Background(), apiToken, thingID, []string{id, tmpID}, []string{writePolicy, deletePolicy}) - assert.Nil(t, err, fmt.Sprintf("adding multiple policies expected to succeed: %s", err)) - - cases := []struct { - desc string - policy auth.PolicyReq - err error - }{ - { - desc: "check valid 'read' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: readPolicy, Subject: id}, - err: nil, - }, - { - desc: "check valid 'write' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: writePolicy, Subject: id}, - err: nil, - }, - { - desc: "check valid 'delete' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: deletePolicy, Subject: id}, - err: nil, - }, - { - desc: "check valid 'read' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: readPolicy, Subject: tmpID}, - err: nil, - }, - { - desc: "check valid 'write' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: writePolicy, Subject: tmpID}, - err: nil, - }, - { - desc: "check valid 'delete' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: deletePolicy, Subject: tmpID}, - err: nil, - }, - { - desc: "check invalid 'access' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: "access", Subject: id}, - err: errors.ErrAuthorization, - }, - { - desc: "check invalid 'access' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: "access", Subject: tmpID}, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - err := svc.Authorize(context.Background(), tc.policy) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %v, got %v", tc.desc, tc.err, err)) - } -} - -func TestDeletePolicies(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - thingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - tmpID := "tmpid" - readPolicy := read - writePolicy := "write" - deletePolicy := "delete" - memberPolicy := "member" - - // Add read, write and delete policies to users. - err = svc.AddPolicies(context.Background(), apiToken, thingID, []string{id, tmpID}, []string{readPolicy, writePolicy, deletePolicy, memberPolicy}) - assert.Nil(t, err, fmt.Sprintf("adding policies expected to succeed: %s", err)) - - // Delete multiple policies from single user. - err = svc.DeletePolicies(context.Background(), apiToken, thingID, []string{id}, []string{readPolicy, writePolicy}) - assert.Nil(t, err, fmt.Sprintf("deleting policies from single user expected to succeed: %s", err)) - - // Delete multiple policies from multiple user. - err = svc.DeletePolicies(context.Background(), apiToken, thingID, []string{id, tmpID}, []string{deletePolicy, memberPolicy}) - assert.Nil(t, err, fmt.Sprintf("deleting policies from multiple user expected to succeed: %s", err)) - - cases := []struct { - desc string - policy auth.PolicyReq - err error - }{ - { - desc: "check non-existing 'read' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: readPolicy, Subject: id}, - err: errors.ErrAuthorization, - }, - { - desc: "check non-existing 'write' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: writePolicy, Subject: id}, - err: errors.ErrAuthorization, - }, - { - desc: "check non-existing 'delete' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: deletePolicy, Subject: id}, - err: errors.ErrAuthorization, - }, - { - desc: "check non-existing 'member' policy of user with id", - policy: auth.PolicyReq{Object: thingID, Relation: memberPolicy, Subject: id}, - err: errors.ErrAuthorization, - }, - { - desc: "check non-existing 'delete' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: deletePolicy, Subject: tmpID}, - err: errors.ErrAuthorization, - }, - { - desc: "check non-existing 'member' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: memberPolicy, Subject: tmpID}, - err: errors.ErrAuthorization, - }, - { - desc: "check valid 'read' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: readPolicy, Subject: tmpID}, - err: nil, - }, - { - desc: "check valid 'write' policy of user with tmpid", - policy: auth.PolicyReq{Object: thingID, Relation: writePolicy, Subject: tmpID}, - err: nil, - }, - } - - for _, tc := range cases { - err := svc.Authorize(context.Background(), tc.policy) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %v, got %v", tc.desc, tc.err, err)) - } -} - -func TestListPolicies(t *testing.T) { - svc := newService() - _, secret, err := svc.Issue(context.Background(), "", auth.Key{Type: auth.LoginKey, IssuedAt: time.Now(), IssuerID: id, Subject: email}) - assert.Nil(t, err, fmt.Sprintf("Issuing login key expected to succeed: %s", err)) - - key := auth.Key{ - ID: "id", - Type: auth.APIKey, - IssuerID: id, - Subject: email, - IssuedAt: time.Now(), - } - - _, apiToken, err := svc.Issue(context.Background(), secret, key) - assert.Nil(t, err, fmt.Sprintf("Issuing user's key expected to succeed: %s", err)) - - readPolicy := read - pageLen := 15 - - // Add arbitrary policies to the user. - for i := 0; i < pageLen; i++ { - err = svc.AddPolicies(context.Background(), apiToken, fmt.Sprintf("thing-%d", i), []string{id}, []string{readPolicy}) - assert.Nil(t, err, fmt.Sprintf("adding policies expected to succeed: %s", err)) - } - - page, err := svc.ListPolicies(context.Background(), auth.PolicyReq{Subject: id, Relation: readPolicy}) - assert.Nil(t, err, fmt.Sprintf("listing policies expected to succeed: %s", err)) - assert.Equal(t, pageLen, len(page.Policies), fmt.Sprintf("unexpected listing page size, expected %d, got %d: %v", pageLen, len(page.Policies), err)) - -} diff --git a/auth/tokenizer.go b/auth/tokenizer.go deleted file mode 100644 index d8bd4f731a..0000000000 --- a/auth/tokenizer.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package auth - -// Tokenizer specifies API for encoding and decoding between string and Key. -type Tokenizer interface { - // Issue converts API Key to its string representation. - Issue(Key) (string, error) - - // Parse extracts API Key data from string token. - Parse(string) (Key, error) -} diff --git a/auth/tracing/groups.go b/auth/tracing/groups.go deleted file mode 100644 index 80b53ca2ef..0000000000 --- a/auth/tracing/groups.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package tracing contains middlewares that will add spans to existing traces. -package tracing - -import ( - "context" - - "github.com/mainflux/mainflux/auth" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - assign = "assign" - saveGroup = "save_group" - deleteGroup = "delete_group" - updateGroup = "update_group" - retrieveByID = "retrieve_by_id" - retrieveAllParents = "retrieve_all_parents" - retrieveAllChildren = "retrieve_all_children" - retrieveAll = "retrieve_all_groups" - memberships = "memberships" - members = "members" - unassign = "unassign" -) - -var _ auth.GroupRepository = (*groupRepositoryMiddleware)(nil) - -type groupRepositoryMiddleware struct { - tracer opentracing.Tracer - repo auth.GroupRepository -} - -// GroupRepositoryMiddleware tracks request and their latency, and adds spans to context. -func GroupRepositoryMiddleware(tracer opentracing.Tracer, gr auth.GroupRepository) auth.GroupRepository { - return groupRepositoryMiddleware{ - tracer: tracer, - repo: gr, - } -} - -func (grm groupRepositoryMiddleware) Save(ctx context.Context, g auth.Group) (auth.Group, error) { - span := createSpan(ctx, grm.tracer, saveGroup) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Save(ctx, g) -} - -func (grm groupRepositoryMiddleware) Update(ctx context.Context, g auth.Group) (auth.Group, error) { - span := createSpan(ctx, grm.tracer, updateGroup) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Update(ctx, g) -} - -func (grm groupRepositoryMiddleware) Delete(ctx context.Context, groupID string) error { - span := createSpan(ctx, grm.tracer, deleteGroup) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Delete(ctx, groupID) -} - -func (grm groupRepositoryMiddleware) RetrieveByID(ctx context.Context, id string) (auth.Group, error) { - span := createSpan(ctx, grm.tracer, retrieveByID) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.RetrieveByID(ctx, id) -} - -func (grm groupRepositoryMiddleware) RetrieveAllParents(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - span := createSpan(ctx, grm.tracer, retrieveAllParents) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.RetrieveAllParents(ctx, groupID, pm) -} - -func (grm groupRepositoryMiddleware) RetrieveAllChildren(ctx context.Context, groupID string, pm auth.PageMetadata) (auth.GroupPage, error) { - span := createSpan(ctx, grm.tracer, retrieveAllChildren) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.RetrieveAllChildren(ctx, groupID, pm) -} - -func (grm groupRepositoryMiddleware) RetrieveAll(ctx context.Context, pm auth.PageMetadata) (auth.GroupPage, error) { - span := createSpan(ctx, grm.tracer, retrieveAll) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.RetrieveAll(ctx, pm) -} - -func (grm groupRepositoryMiddleware) Memberships(ctx context.Context, memberID string, pm auth.PageMetadata) (auth.GroupPage, error) { - span := createSpan(ctx, grm.tracer, memberships) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Memberships(ctx, memberID, pm) -} - -func (grm groupRepositoryMiddleware) Members(ctx context.Context, groupID, groupType string, pm auth.PageMetadata) (auth.MemberPage, error) { - span := createSpan(ctx, grm.tracer, members) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Members(ctx, groupID, groupType, pm) -} - -func (grm groupRepositoryMiddleware) Assign(ctx context.Context, groupID, groupType string, memberIDs ...string) error { - span := createSpan(ctx, grm.tracer, assign) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Assign(ctx, groupID, groupType, memberIDs...) -} - -func (grm groupRepositoryMiddleware) Unassign(ctx context.Context, groupID string, memberIDs ...string) error { - span := createSpan(ctx, grm.tracer, unassign) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return grm.repo.Unassign(ctx, groupID, memberIDs...) -} diff --git a/auth/tracing/keys.go b/auth/tracing/keys.go deleted file mode 100644 index 4cd229a597..0000000000 --- a/auth/tracing/keys.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package tracing contains middlewares that will add spans -// to existing traces. -package tracing - -import ( - "context" - - "github.com/mainflux/mainflux/auth" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - saveOp = "save" - retrieveOp = "retrieve_by_id" - retrieveAllOp = "retrieve_all" - revokeOp = "remove" -) - -var _ auth.KeyRepository = (*keyRepositoryMiddleware)(nil) - -// keyRepositoryMiddleware tracks request and their latency, and adds spans -// to context. -type keyRepositoryMiddleware struct { - tracer opentracing.Tracer - repo auth.KeyRepository -} - -// New tracks request and their latency, and adds spans -// to context. -func New(tracer opentracing.Tracer, repo auth.KeyRepository) auth.KeyRepository { - return keyRepositoryMiddleware{ - tracer: tracer, - repo: repo, - } -} - -func (krm keyRepositoryMiddleware) Save(ctx context.Context, key auth.Key) (string, error) { - span := createSpan(ctx, krm.tracer, saveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return krm.repo.Save(ctx, key) -} - -func (krm keyRepositoryMiddleware) RetrieveByID(ctx context.Context, owner, id string) (auth.Key, error) { - span := createSpan(ctx, krm.tracer, retrieveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return krm.repo.RetrieveByID(ctx, owner, id) -} - -func (krm keyRepositoryMiddleware) RetrieveAll(ctx context.Context, owner string, pm auth.PageMetadata) (auth.KeyPage, error) { - span := createSpan(ctx, krm.tracer, retrieveAllOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return krm.repo.RetrieveAll(ctx, owner, pm) -} - -func (krm keyRepositoryMiddleware) Remove(ctx context.Context, owner, id string) error { - span := createSpan(ctx, krm.tracer, revokeOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return krm.repo.Remove(ctx, owner, id) -} - -func createSpan(ctx context.Context, tracer opentracing.Tracer, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - - return tracer.StartSpan(opName) -} diff --git a/auth_grpc.pb.go b/auth_grpc.pb.go deleted file mode 100644 index e25994b21f..0000000000 --- a/auth_grpc.pb.go +++ /dev/null @@ -1,552 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.12 -// source: auth.proto - -package mainflux - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// ThingsServiceClient is the client API for ThingsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ThingsServiceClient interface { - CanAccessByKey(ctx context.Context, in *AccessByKeyReq, opts ...grpc.CallOption) (*ThingID, error) - IsChannelOwner(ctx context.Context, in *ChannelOwnerReq, opts ...grpc.CallOption) (*emptypb.Empty, error) - CanAccessByID(ctx context.Context, in *AccessByIDReq, opts ...grpc.CallOption) (*emptypb.Empty, error) - Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*ThingID, error) -} - -type thingsServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewThingsServiceClient(cc grpc.ClientConnInterface) ThingsServiceClient { - return &thingsServiceClient{cc} -} - -func (c *thingsServiceClient) CanAccessByKey(ctx context.Context, in *AccessByKeyReq, opts ...grpc.CallOption) (*ThingID, error) { - out := new(ThingID) - err := c.cc.Invoke(ctx, "/mainflux.ThingsService/CanAccessByKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *thingsServiceClient) IsChannelOwner(ctx context.Context, in *ChannelOwnerReq, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/mainflux.ThingsService/IsChannelOwner", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *thingsServiceClient) CanAccessByID(ctx context.Context, in *AccessByIDReq, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/mainflux.ThingsService/CanAccessByID", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *thingsServiceClient) Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*ThingID, error) { - out := new(ThingID) - err := c.cc.Invoke(ctx, "/mainflux.ThingsService/Identify", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ThingsServiceServer is the server API for ThingsService service. -// All implementations must embed UnimplementedThingsServiceServer -// for forward compatibility -type ThingsServiceServer interface { - CanAccessByKey(context.Context, *AccessByKeyReq) (*ThingID, error) - IsChannelOwner(context.Context, *ChannelOwnerReq) (*emptypb.Empty, error) - CanAccessByID(context.Context, *AccessByIDReq) (*emptypb.Empty, error) - Identify(context.Context, *Token) (*ThingID, error) - mustEmbedUnimplementedThingsServiceServer() -} - -// UnimplementedThingsServiceServer must be embedded to have forward compatible implementations. -type UnimplementedThingsServiceServer struct { -} - -func (UnimplementedThingsServiceServer) CanAccessByKey(context.Context, *AccessByKeyReq) (*ThingID, error) { - return nil, status.Errorf(codes.Unimplemented, "method CanAccessByKey not implemented") -} -func (UnimplementedThingsServiceServer) IsChannelOwner(context.Context, *ChannelOwnerReq) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method IsChannelOwner not implemented") -} -func (UnimplementedThingsServiceServer) CanAccessByID(context.Context, *AccessByIDReq) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CanAccessByID not implemented") -} -func (UnimplementedThingsServiceServer) Identify(context.Context, *Token) (*ThingID, error) { - return nil, status.Errorf(codes.Unimplemented, "method Identify not implemented") -} -func (UnimplementedThingsServiceServer) mustEmbedUnimplementedThingsServiceServer() {} - -// UnsafeThingsServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ThingsServiceServer will -// result in compilation errors. -type UnsafeThingsServiceServer interface { - mustEmbedUnimplementedThingsServiceServer() -} - -func RegisterThingsServiceServer(s grpc.ServiceRegistrar, srv ThingsServiceServer) { - s.RegisterService(&ThingsService_ServiceDesc, srv) -} - -func _ThingsService_CanAccessByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AccessByKeyReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ThingsServiceServer).CanAccessByKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.ThingsService/CanAccessByKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ThingsServiceServer).CanAccessByKey(ctx, req.(*AccessByKeyReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _ThingsService_IsChannelOwner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ChannelOwnerReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ThingsServiceServer).IsChannelOwner(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.ThingsService/IsChannelOwner", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ThingsServiceServer).IsChannelOwner(ctx, req.(*ChannelOwnerReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _ThingsService_CanAccessByID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AccessByIDReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ThingsServiceServer).CanAccessByID(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.ThingsService/CanAccessByID", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ThingsServiceServer).CanAccessByID(ctx, req.(*AccessByIDReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _ThingsService_Identify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Token) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ThingsServiceServer).Identify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.ThingsService/Identify", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ThingsServiceServer).Identify(ctx, req.(*Token)) - } - return interceptor(ctx, in, info, handler) -} - -// ThingsService_ServiceDesc is the grpc.ServiceDesc for ThingsService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ThingsService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "mainflux.ThingsService", - HandlerType: (*ThingsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CanAccessByKey", - Handler: _ThingsService_CanAccessByKey_Handler, - }, - { - MethodName: "IsChannelOwner", - Handler: _ThingsService_IsChannelOwner_Handler, - }, - { - MethodName: "CanAccessByID", - Handler: _ThingsService_CanAccessByID_Handler, - }, - { - MethodName: "Identify", - Handler: _ThingsService_Identify_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "auth.proto", -} - -// AuthServiceClient is the client API for AuthService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type AuthServiceClient interface { - Issue(ctx context.Context, in *IssueReq, opts ...grpc.CallOption) (*Token, error) - Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*UserIdentity, error) - Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) - AddPolicy(ctx context.Context, in *AddPolicyReq, opts ...grpc.CallOption) (*AddPolicyRes, error) - DeletePolicy(ctx context.Context, in *DeletePolicyReq, opts ...grpc.CallOption) (*DeletePolicyRes, error) - ListPolicies(ctx context.Context, in *ListPoliciesReq, opts ...grpc.CallOption) (*ListPoliciesRes, error) - Assign(ctx context.Context, in *Assignment, opts ...grpc.CallOption) (*emptypb.Empty, error) - Members(ctx context.Context, in *MembersReq, opts ...grpc.CallOption) (*MembersRes, error) -} - -type authServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewAuthServiceClient(cc grpc.ClientConnInterface) AuthServiceClient { - return &authServiceClient{cc} -} - -func (c *authServiceClient) Issue(ctx context.Context, in *IssueReq, opts ...grpc.CallOption) (*Token, error) { - out := new(Token) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/Issue", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*UserIdentity, error) { - out := new(UserIdentity) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/Identify", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) { - out := new(AuthorizeRes) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/Authorize", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) AddPolicy(ctx context.Context, in *AddPolicyReq, opts ...grpc.CallOption) (*AddPolicyRes, error) { - out := new(AddPolicyRes) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/AddPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) DeletePolicy(ctx context.Context, in *DeletePolicyReq, opts ...grpc.CallOption) (*DeletePolicyRes, error) { - out := new(DeletePolicyRes) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/DeletePolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) ListPolicies(ctx context.Context, in *ListPoliciesReq, opts ...grpc.CallOption) (*ListPoliciesRes, error) { - out := new(ListPoliciesRes) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/ListPolicies", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) Assign(ctx context.Context, in *Assignment, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/Assign", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authServiceClient) Members(ctx context.Context, in *MembersReq, opts ...grpc.CallOption) (*MembersRes, error) { - out := new(MembersRes) - err := c.cc.Invoke(ctx, "/mainflux.AuthService/Members", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AuthServiceServer is the server API for AuthService service. -// All implementations must embed UnimplementedAuthServiceServer -// for forward compatibility -type AuthServiceServer interface { - Issue(context.Context, *IssueReq) (*Token, error) - Identify(context.Context, *Token) (*UserIdentity, error) - Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) - AddPolicy(context.Context, *AddPolicyReq) (*AddPolicyRes, error) - DeletePolicy(context.Context, *DeletePolicyReq) (*DeletePolicyRes, error) - ListPolicies(context.Context, *ListPoliciesReq) (*ListPoliciesRes, error) - Assign(context.Context, *Assignment) (*emptypb.Empty, error) - Members(context.Context, *MembersReq) (*MembersRes, error) - mustEmbedUnimplementedAuthServiceServer() -} - -// UnimplementedAuthServiceServer must be embedded to have forward compatible implementations. -type UnimplementedAuthServiceServer struct { -} - -func (UnimplementedAuthServiceServer) Issue(context.Context, *IssueReq) (*Token, error) { - return nil, status.Errorf(codes.Unimplemented, "method Issue not implemented") -} -func (UnimplementedAuthServiceServer) Identify(context.Context, *Token) (*UserIdentity, error) { - return nil, status.Errorf(codes.Unimplemented, "method Identify not implemented") -} -func (UnimplementedAuthServiceServer) Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) { - return nil, status.Errorf(codes.Unimplemented, "method Authorize not implemented") -} -func (UnimplementedAuthServiceServer) AddPolicy(context.Context, *AddPolicyReq) (*AddPolicyRes, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddPolicy not implemented") -} -func (UnimplementedAuthServiceServer) DeletePolicy(context.Context, *DeletePolicyReq) (*DeletePolicyRes, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeletePolicy not implemented") -} -func (UnimplementedAuthServiceServer) ListPolicies(context.Context, *ListPoliciesReq) (*ListPoliciesRes, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPolicies not implemented") -} -func (UnimplementedAuthServiceServer) Assign(context.Context, *Assignment) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented") -} -func (UnimplementedAuthServiceServer) Members(context.Context, *MembersReq) (*MembersRes, error) { - return nil, status.Errorf(codes.Unimplemented, "method Members not implemented") -} -func (UnimplementedAuthServiceServer) mustEmbedUnimplementedAuthServiceServer() {} - -// UnsafeAuthServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to AuthServiceServer will -// result in compilation errors. -type UnsafeAuthServiceServer interface { - mustEmbedUnimplementedAuthServiceServer() -} - -func RegisterAuthServiceServer(s grpc.ServiceRegistrar, srv AuthServiceServer) { - s.RegisterService(&AuthService_ServiceDesc, srv) -} - -func _AuthService_Issue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IssueReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).Issue(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/Issue", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Issue(ctx, req.(*IssueReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_Identify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Token) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).Identify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/Identify", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Identify(ctx, req.(*Token)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizeReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).Authorize(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/Authorize", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Authorize(ctx, req.(*AuthorizeReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_AddPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddPolicyReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).AddPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/AddPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).AddPolicy(ctx, req.(*AddPolicyReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_DeletePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeletePolicyReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).DeletePolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/DeletePolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).DeletePolicy(ctx, req.(*DeletePolicyReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_ListPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPoliciesReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).ListPolicies(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/ListPolicies", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).ListPolicies(ctx, req.(*ListPoliciesReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_Assign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Assignment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).Assign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/Assign", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Assign(ctx, req.(*Assignment)) - } - return interceptor(ctx, in, info, handler) -} - -func _AuthService_Members_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MembersReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServiceServer).Members(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mainflux.AuthService/Members", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServiceServer).Members(ctx, req.(*MembersReq)) - } - return interceptor(ctx, in, info, handler) -} - -// AuthService_ServiceDesc is the grpc.ServiceDesc for AuthService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var AuthService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "mainflux.AuthService", - HandlerType: (*AuthServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Issue", - Handler: _AuthService_Issue_Handler, - }, - { - MethodName: "Identify", - Handler: _AuthService_Identify_Handler, - }, - { - MethodName: "Authorize", - Handler: _AuthService_Authorize_Handler, - }, - { - MethodName: "AddPolicy", - Handler: _AuthService_AddPolicy_Handler, - }, - { - MethodName: "DeletePolicy", - Handler: _AuthService_DeletePolicy_Handler, - }, - { - MethodName: "ListPolicies", - Handler: _AuthService_ListPolicies_Handler, - }, - { - MethodName: "Assign", - Handler: _AuthService_Assign_Handler, - }, - { - MethodName: "Members", - Handler: _AuthService_Members_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "auth.proto", -} diff --git a/bootstrap/README.md b/bootstrap/README.md index b64bd1889b..5a1b161a22 100644 --- a/bootstrap/README.md +++ b/bootstrap/README.md @@ -62,8 +62,8 @@ The service is configured using the environment variables presented in the follo | MF_BOOTSTRAP_ES_DB | Bootstrap service event source database | 0 | | MF_BOOTSTRAP_EVENT_CONSUMER | Bootstrap service event source consumer name | bootstrap | | MF_JAEGER_URL | Jaeger server URL | localhost:6831 | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | ## Deployment @@ -104,8 +104,8 @@ MF_BOOTSTRAP_SERVER_KEY=[Path to server key] \ MF_SDK_BASE_URL=[Base SDK URL for the Mainflux services] \ MF_SDK_THINGS_PREFIX=[SDK prefix for Things service] \ MF_JAEGER_URL=[Jaeger server URL] \ -MF_AUTH_GRPC_URL=[Auth service gRPC URL] \ -MF_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MF_AUTH_GRPC_URL=[Users service gRPC URL] \ +MF_AUTH_GRPC_TIMEOUT=[Users service gRPC request timeout in seconds] \ $GOBIN/mainflux-bootstrap ``` diff --git a/bootstrap/api/endpoint_test.go b/bootstrap/api/endpoint_test.go index 05965135f9..2e00fa6558 100644 --- a/bootstrap/api/endpoint_test.go +++ b/bootstrap/api/endpoint_test.go @@ -12,24 +12,29 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strconv" "strings" "testing" - "github.com/mainflux/mainflux" + "github.com/go-zoo/bone" "github.com/mainflux/mainflux/bootstrap" bsapi "github.com/mainflux/mainflux/bootstrap/api" "github.com/mainflux/mainflux/bootstrap/mocks" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/things" - thingsapi "github.com/mainflux/mainflux/things/api/things/http" - "github.com/opentracing/opentracing-go/mocktracer" + "github.com/mainflux/mainflux/things/clients" + capi "github.com/mainflux/mainflux/things/clients/api" + "github.com/mainflux/mainflux/things/groups" + gapi "github.com/mainflux/mainflux/things/groups/api" + tpolicies "github.com/mainflux/mainflux/things/policies" + papi "github.com/mainflux/mainflux/things/policies/api/http" + upolicies "github.com/mainflux/mainflux/users/policies" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -163,7 +168,7 @@ func dec(in []byte) ([]byte, error) { return in, nil } -func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { +func newService(auth upolicies.AuthServiceClient, url string) bootstrap.Service { things := mocks.NewConfigsRepository() config := mfsdk.Config{ ThingsURL: url, @@ -173,11 +178,11 @@ func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { return bootstrap.New(auth, things, sdk, encKey) } -func generateChannels() map[string]things.Channel { - channels := make(map[string]things.Channel, channelsNum) +func generateChannels() map[string]mfgroups.Group { + channels := make(map[string]mfgroups.Group, channelsNum) for i := 0; i < channelsNum; i++ { id := strconv.Itoa(i + 1) - channels[id] = things.Channel{ + channels[id] = mfgroups.Group{ ID: id, Owner: email, Metadata: metadata, @@ -186,18 +191,24 @@ func generateChannels() map[string]things.Channel { return channels } -func newThingsService(auth mainflux.AuthServiceClient) things.Service { - return mocks.NewThingsService(map[string]things.Thing{}, generateChannels(), auth) +func newThingsService(auth upolicies.AuthServiceClient) (clients.Service, groups.Service, tpolicies.Service) { + csvc := mocks.NewThingsService(map[string]mfclients.Client{}, auth) + gsvc := mocks.NewChannelsService(generateChannels(), auth) + psvc := mocks.NewPoliciesService(auth) + return csvc, gsvc, psvc } -func newThingsServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := thingsapi.MakeHandler(mocktracer.New(), svc, logger) +func newThingsServer(csvc clients.Service, gsvc groups.Service, psvc tpolicies.Service) *httptest.Server { + logger := mflog.NewMock() + mux := bone.New() + capi.MakeHandler(csvc, mux, logger) + gapi.MakeHandler(gsvc, mux, logger) + papi.MakePolicyHandler(csvc, psvc, mux, logger) return httptest.NewServer(mux) } func newBootstrapServer(svc bootstrap.Service) *httptest.Server { - logger := logger.NewMock() + logger := mflog.NewMock() mux := bsapi.MakeHandler(svc, bootstrap.NewConfigReader(encKey), logger) return httptest.NewServer(mux) } @@ -1155,7 +1166,7 @@ func TestBootstrap(t *testing.T) { assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) if tc.secure && tc.status == http.StatusOK { body, err = dec(body) diff --git a/bootstrap/api/logging.go b/bootstrap/api/logging.go index 49c903b736..130c265a4e 100644 --- a/bootstrap/api/logging.go +++ b/bootstrap/api/logging.go @@ -11,18 +11,18 @@ import ( "time" "github.com/mainflux/mainflux/bootstrap" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" ) var _ bootstrap.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc bootstrap.Service } // NewLoggingMiddleware adds logging facilities to the core service. -func NewLoggingMiddleware(svc bootstrap.Service, logger log.Logger) bootstrap.Service { +func NewLoggingMiddleware(svc bootstrap.Service, logger mflog.Logger) bootstrap.Service { return &loggingMiddleware{logger, svc} } diff --git a/bootstrap/api/transport.go b/bootstrap/api/transport.go index f8d019d3b6..bbc3dd8e13 100644 --- a/bootstrap/api/transport.go +++ b/bootstrap/api/transport.go @@ -15,7 +15,7 @@ import ( "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/bootstrap" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -34,7 +34,7 @@ var ( ) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc bootstrap.Service, reader bootstrap.ConfigReader, logger logger.Logger) http.Handler { +func MakeHandler(svc bootstrap.Service, reader bootstrap.ConfigReader, logger mflog.Logger) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), } diff --git a/bootstrap/configs.go b/bootstrap/configs.go index d43f8cfcfa..961bac77d9 100644 --- a/bootstrap/configs.go +++ b/bootstrap/configs.go @@ -3,31 +3,44 @@ package bootstrap +import ( + "time" + + "github.com/mainflux/mainflux/pkg/clients" +) + // Config represents Configuration entity. It wraps information about external entity // as well as info about corresponding Mainflux entities. // MFThing represents corresponding Mainflux Thing ID. // MFKey is key of corresponding Mainflux Thing. // MFChannels is a list of Mainflux Channels corresponding Mainflux Thing connects to. type Config struct { - MFThing string - Owner string - Name string - ClientCert string - ClientKey string - CACert string - MFKey string - MFChannels []Channel - ExternalID string - ExternalKey string - Content string - State State + MFThing string `json:"mainflux_thing"` + Owner string `json:"owner,omitempty"` + Name string `json:"name,omitempty"` + ClientCert string `json:"client_cert,omitempty"` + ClientKey string `json:"client_key,omitempty"` + CACert string `json:"ca_cert,omitempty"` + MFKey string `json:"mainflux_key"` + MFChannels []Channel `json:"mainflux_channels,omitempty"` + ExternalID string `json:"external_id"` + ExternalKey string `json:"external_key"` + Content string `json:"content,omitempty"` + State State `json:"state"` } // Channel represents Mainflux channel corresponding Mainflux Thing is connected to. type Channel struct { - ID string - Name string - Metadata map[string]interface{} + ID string `json:"id"` + Name string `json:"name,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + Owner string `json:"owner_id"` + Parent string `json:"parent_id,omitempty"` + Description string `json:"description,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Status clients.Status `json:"status"` } // Filter is used for the search filters. @@ -39,10 +52,10 @@ type Filter struct { // ConfigsPage contains page related metadata as well as list of Configs that // belong to this page. type ConfigsPage struct { - Total uint64 - Offset uint64 - Limit uint64 - Configs []Config + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + Configs []Config `json:"configs"` } // ConfigRepository specifies a Config persistence API. diff --git a/bootstrap/mocks/channels.go b/bootstrap/mocks/channels.go new file mode 100644 index 0000000000..fb2439da6e --- /dev/null +++ b/bootstrap/mocks/channels.go @@ -0,0 +1,109 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + "strconv" + "sync" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" + upolicies "github.com/mainflux/mainflux/users/policies" +) + +var _ groups.Service = (*mainfluxChannels)(nil) + +type mainfluxChannels struct { + mu sync.Mutex + counter uint64 + channels map[string]mfgroups.Group + auth upolicies.AuthServiceClient +} + +// NewChannelsService returns Mainflux Channels service mock. +// Only methods used by SDK are mocked. +func NewChannelsService(channels map[string]mfgroups.Group, auth upolicies.AuthServiceClient) groups.Service { + return &mainfluxChannels{ + channels: channels, + auth: auth, + } +} + +func (svc *mainfluxChannels) CreateGroups(ctx context.Context, token string, chs ...mfgroups.Group) ([]mfgroups.Group, error) { + svc.mu.Lock() + defer svc.mu.Unlock() + + userID, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return []mfgroups.Group{}, errors.ErrAuthentication + } + for i := range chs { + svc.counter++ + chs[i].Owner = userID.GetId() + chs[i].ID = strconv.FormatUint(svc.counter, 10) + svc.channels[chs[i].ID] = chs[i] + } + + return chs, nil +} + +func (svc *mainfluxChannels) ViewGroup(_ context.Context, owner, id string) (mfgroups.Group, error) { + if c, ok := svc.channels[id]; ok { + return c, nil + } + return mfgroups.Group{}, errors.ErrNotFound +} + +func (svc *mainfluxChannels) ListGroups(context.Context, string, mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + panic("not implemented") +} + +func (svc *mainfluxChannels) ListMemberships(context.Context, string, string, mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + panic("not implemented") +} + +func (svc *mainfluxChannels) UpdateGroup(context.Context, string, mfgroups.Group) (mfgroups.Group, error) { + panic("not implemented") +} + +func (svc *mainfluxChannels) EnableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + svc.mu.Lock() + defer svc.mu.Unlock() + + userID, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return mfgroups.Group{}, errors.ErrAuthentication + } + + if t, ok := svc.channels[id]; !ok || t.Owner != userID.GetId() { + return mfgroups.Group{}, errors.ErrNotFound + } + if t, ok := svc.channels[id]; ok && t.Owner == userID.GetId() { + t.Status = mfclients.EnabledStatus + return t, nil + } + return mfgroups.Group{}, nil +} + +func (svc *mainfluxChannels) DisableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + svc.mu.Lock() + defer svc.mu.Unlock() + + userID, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return mfgroups.Group{}, errors.ErrAuthentication + } + + if t, ok := svc.channels[id]; !ok || t.Owner != userID.GetId() { + return mfgroups.Group{}, errors.ErrNotFound + } + if t, ok := svc.channels[id]; ok && t.Owner == userID.GetId() { + t.Status = mfclients.DisabledStatus + return t, nil + } + return mfgroups.Group{}, nil +} diff --git a/bootstrap/mocks/policies.go b/bootstrap/mocks/policies.go new file mode 100644 index 0000000000..7b3c772701 --- /dev/null +++ b/bootstrap/mocks/policies.go @@ -0,0 +1,71 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + "fmt" + "sync" + + "github.com/mainflux/mainflux/pkg/errors" + tpolicies "github.com/mainflux/mainflux/things/policies" + upolicies "github.com/mainflux/mainflux/users/policies" +) + +var _ tpolicies.Service = (*mainfluxPolicies)(nil) + +type mainfluxPolicies struct { + mu sync.Mutex + auth upolicies.AuthServiceClient + connections map[string]tpolicies.Policy +} + +// NewPoliciesService returns Mainflux Things Policies service mock. +// Only methods used by SDK are mocked. +func NewPoliciesService(auth upolicies.AuthServiceClient) tpolicies.Service { + return &mainfluxPolicies{ + auth: auth, + connections: make(map[string]tpolicies.Policy), + } +} + +func (svc *mainfluxPolicies) AddPolicy(_ context.Context, token string, p tpolicies.Policy) (tpolicies.Policy, error) { + svc.mu.Lock() + defer svc.mu.Unlock() + + if _, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: token}); err != nil { + return tpolicies.Policy{}, errors.ErrAuthentication + } + svc.connections[fmt.Sprintf("%s:%s", p.Subject, p.Object)] = p + + return p, nil +} + +func (svc *mainfluxPolicies) DeletePolicy(_ context.Context, token string, p tpolicies.Policy) error { + svc.mu.Lock() + defer svc.mu.Unlock() + + if _, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: token}); err != nil { + return errors.ErrAuthentication + } + + for _, pol := range svc.connections { + if pol.Subject == p.Subject && pol.Object == p.Object { + delete(svc.connections, fmt.Sprintf("%s:%s", p.Subject, p.Object)) + } + } + return nil +} + +func (svc *mainfluxPolicies) UpdatePolicy(context.Context, string, tpolicies.Policy) (tpolicies.Policy, error) { + panic("not implemented") +} + +func (svc *mainfluxPolicies) Authorize(context.Context, tpolicies.AccessRequest, string) (string, error) { + panic("not implemented") +} + +func (svc *mainfluxPolicies) ListPolicies(context.Context, string, tpolicies.Page) (tpolicies.PolicyPage, error) { + panic("not implemented") +} diff --git a/bootstrap/mocks/things.go b/bootstrap/mocks/things.go index 96aeef8437..47e9c1752e 100644 --- a/bootstrap/mocks/things.go +++ b/bootstrap/mocks/things.go @@ -8,218 +8,125 @@ import ( "strconv" "sync" - "github.com/mainflux/mainflux" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" + "github.com/mainflux/mainflux/things/clients" + upolicies "github.com/mainflux/mainflux/users/policies" ) -var _ things.Service = (*mainfluxThings)(nil) +var _ clients.Service = (*mainfluxThings)(nil) type mainfluxThings struct { - mu sync.Mutex - counter uint64 - things map[string]things.Thing - channels map[string]things.Channel - auth mainflux.AuthServiceClient - connections map[string][]string + mu sync.Mutex + counter uint64 + things map[string]mfclients.Client + auth upolicies.AuthServiceClient } // NewThingsService returns Mainflux Things service mock. // Only methods used by SDK are mocked. -func NewThingsService(things map[string]things.Thing, channels map[string]things.Channel, auth mainflux.AuthServiceClient) things.Service { +func NewThingsService(things map[string]mfclients.Client, auth upolicies.AuthServiceClient) clients.Service { return &mainfluxThings{ - things: things, - channels: channels, - auth: auth, - connections: make(map[string][]string), + things: things, + auth: auth, } } -func (svc *mainfluxThings) CreateThings(_ context.Context, owner string, ths ...things.Thing) ([]things.Thing, error) { +func (svc *mainfluxThings) CreateThings(_ context.Context, owner string, ths ...mfclients.Client) ([]mfclients.Client, error) { svc.mu.Lock() defer svc.mu.Unlock() - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) + userID, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: owner}) if err != nil { - return []things.Thing{}, errors.ErrAuthentication + return []mfclients.Client{}, errors.ErrAuthentication } for i := range ths { svc.counter++ - ths[i].Owner = userID.Email + ths[i].Owner = userID.GetId() ths[i].ID = strconv.FormatUint(svc.counter, 10) - ths[i].Key = ths[i].ID + ths[i].Credentials.Secret = ths[i].ID svc.things[ths[i].ID] = ths[i] } return ths, nil } -func (svc *mainfluxThings) ViewThing(_ context.Context, owner, id string) (things.Thing, error) { +func (svc *mainfluxThings) ViewClient(_ context.Context, owner, id string) (mfclients.Client, error) { svc.mu.Lock() defer svc.mu.Unlock() - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) + userID, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: owner}) if err != nil { - return things.Thing{}, errors.ErrAuthentication + return mfclients.Client{}, errors.ErrAuthentication } - if t, ok := svc.things[id]; ok && t.Owner == userID.Email { + if t, ok := svc.things[id]; ok && t.Owner == userID.GetId() { return t, nil } - return things.Thing{}, errors.ErrNotFound + return mfclients.Client{}, errors.ErrNotFound } -func (svc *mainfluxThings) Connect(_ context.Context, owner string, chIDs, thIDs []string) error { +func (svc *mainfluxThings) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { svc.mu.Lock() defer svc.mu.Unlock() - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) + userID, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: token}) if err != nil { - return errors.ErrAuthentication + return mfclients.Client{}, errors.ErrAuthentication } - for _, chID := range chIDs { - if svc.channels[chID].Owner != userID.Email { - return errors.ErrAuthentication - } - svc.connections[chID] = append(svc.connections[chID], thIDs...) - } - - return nil -} - -func (svc *mainfluxThings) Disconnect(_ context.Context, owner string, chIDs, thIDs []string) error { - svc.mu.Lock() - defer svc.mu.Unlock() - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) - if err != nil { - return errors.ErrAuthentication + if t, ok := svc.things[id]; !ok || t.Owner != userID.GetId() { + return mfclients.Client{}, errors.ErrNotFound } - - for _, chID := range chIDs { - if svc.channels[chID].Owner != userID.Email { - return errors.ErrAuthentication - } - - ids := svc.connections[chID] - var count int - var newConns []string - for _, thID := range thIDs { - for _, id := range ids { - if id == thID { - count++ - continue - } - newConns = append(newConns, id) - } - - if len(newConns)-len(ids) != count { - return errors.ErrNotFound - } - svc.connections[chID] = newConns - } + if t, ok := svc.things[id]; ok && t.Owner == userID.GetId() { + t.Status = mfclients.EnabledStatus + return t, nil } - return nil + return mfclients.Client{}, nil } -func (svc *mainfluxThings) RemoveThing(_ context.Context, owner, id string) error { +func (svc *mainfluxThings) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { svc.mu.Lock() defer svc.mu.Unlock() - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) + userID, err := svc.auth.Identify(context.Background(), &upolicies.Token{Value: token}) if err != nil { - return errors.ErrAuthentication - } - - if t, ok := svc.things[id]; !ok || t.Owner != userID.Email { - return errors.ErrNotFound - } - - delete(svc.things, id) - conns := make(map[string][]string) - for k, v := range svc.connections { - i := findIndex(v, id) - if i != -1 { - var tmp []string - if i != len(v)-2 { - tmp = v[i+1:] - } - conns[k] = append(v[:i], tmp...) - } + return mfclients.Client{}, errors.ErrAuthentication } - svc.connections = conns - return nil -} - -func (svc *mainfluxThings) ViewChannel(_ context.Context, owner, id string) (things.Channel, error) { - if c, ok := svc.channels[id]; ok { - return c, nil + if t, ok := svc.things[id]; !ok || t.Owner != userID.GetId() { + return mfclients.Client{}, errors.ErrNotFound } - return things.Channel{}, errors.ErrNotFound -} - -func (svc *mainfluxThings) UpdateThing(context.Context, string, things.Thing) error { - panic("not implemented") -} - -func (svc *mainfluxThings) UpdateKey(context.Context, string, string, string) error { - panic("not implemented") -} - -func (svc *mainfluxThings) ListThings(context.Context, string, things.PageMetadata) (things.Page, error) { - panic("not implemented") -} - -func (svc *mainfluxThings) ListChannelsByThing(context.Context, string, string, things.PageMetadata) (things.ChannelsPage, error) { - panic("not implemented") -} - -func (svc *mainfluxThings) ListThingsByChannel(context.Context, string, string, things.PageMetadata) (things.Page, error) { - panic("not implemented") -} - -func (svc *mainfluxThings) CreateChannels(_ context.Context, owner string, chs ...things.Channel) ([]things.Channel, error) { - svc.mu.Lock() - defer svc.mu.Unlock() - - userID, err := svc.auth.Identify(context.Background(), &mainflux.Token{Value: owner}) - if err != nil { - return []things.Channel{}, errors.ErrAuthentication - } - for i := range chs { - svc.counter++ - chs[i].Owner = userID.Email - chs[i].ID = strconv.FormatUint(svc.counter, 10) - svc.channels[chs[i].ID] = chs[i] + if t, ok := svc.things[id]; ok && t.Owner == userID.GetId() { + t.Status = mfclients.DisabledStatus + return t, nil } - - return chs, nil + return mfclients.Client{}, nil } -func (svc *mainfluxThings) UpdateChannel(context.Context, string, things.Channel) error { +func (svc *mainfluxThings) UpdateClient(context.Context, string, mfclients.Client) (mfclients.Client, error) { panic("not implemented") } -func (svc *mainfluxThings) ListChannels(context.Context, string, things.PageMetadata) (things.ChannelsPage, error) { +func (svc *mainfluxThings) UpdateClientSecret(context.Context, string, string, string) (mfclients.Client, error) { panic("not implemented") } -func (svc *mainfluxThings) RemoveChannel(context.Context, string, string) error { +func (svc *mainfluxThings) UpdateClientOwner(context.Context, string, mfclients.Client) (mfclients.Client, error) { panic("not implemented") } -func (svc *mainfluxThings) CanAccessByKey(context.Context, string, string) (string, error) { +func (svc *mainfluxThings) UpdateClientTags(context.Context, string, mfclients.Client) (mfclients.Client, error) { panic("not implemented") } -func (svc *mainfluxThings) CanAccessByID(context.Context, string, string) error { +func (svc *mainfluxThings) ListClients(context.Context, string, mfclients.Page) (mfclients.ClientsPage, error) { panic("not implemented") } -func (svc *mainfluxThings) IsChannelOwner(context.Context, string, string) error { +func (svc *mainfluxThings) ListClientsByGroup(context.Context, string, string, mfclients.Page) (mfclients.MembersPage, error) { panic("not implemented") } @@ -227,20 +134,6 @@ func (svc *mainfluxThings) Identify(context.Context, string) (string, error) { panic("not implemented") } -func (svc *mainfluxThings) ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) error { - panic("not implemented") -} - -func findIndex(list []string, val string) int { - for i, v := range list { - if v == val { - return i - } - } - - return -1 -} - -func (svc *mainfluxThings) ListMembers(ctx context.Context, token, groupID string, pm things.PageMetadata) (things.Page, error) { +func (svc *mainfluxThings) ShareClient(ctx context.Context, token, thingID string, actions, userIDs []string) error { panic("not implemented") } diff --git a/bootstrap/mocks/users.go b/bootstrap/mocks/users.go index 54942d69e7..3b45ecaec6 100644 --- a/bootstrap/mocks/users.go +++ b/bootstrap/mocks/users.go @@ -6,60 +6,46 @@ package mocks import ( "context" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" "google.golang.org/grpc" ) -var _ mainflux.AuthServiceClient = (*serviceMock)(nil) +var _ policies.AuthServiceClient = (*serviceMock)(nil) type serviceMock struct { users map[string]string } // NewAuthClient creates mock of users service. -func NewAuthClient(users map[string]string) mainflux.AuthServiceClient { +func NewAuthClient(users map[string]string) policies.AuthServiceClient { return &serviceMock{users} } -func (svc serviceMock) Identify(ctx context.Context, in *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { +func (svc serviceMock) Identify(ctx context.Context, in *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { if id, ok := svc.users[in.Value]; ok { - return &mainflux.UserIdentity{Email: id, Id: id}, nil + return &policies.UserIdentity{Id: id}, nil } return nil, errors.ErrAuthentication } -func (svc serviceMock) Issue(ctx context.Context, in *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { +func (svc serviceMock) Issue(ctx context.Context, in *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { if id, ok := svc.users[in.GetEmail()]; ok { - switch in.Type { - default: - return &mainflux.Token{Value: id}, nil - } + return &policies.Token{Value: id}, nil } return nil, errors.ErrAuthentication } -func (svc serviceMock) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { +func (svc serviceMock) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { panic("not implemented") } -func (svc serviceMock) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { +func (svc serviceMock) AddPolicy(ctx context.Context, req *policies.AddPolicyReq, _ ...grpc.CallOption) (r *policies.AddPolicyRes, err error) { panic("not implemented") } - -func (svc serviceMock) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - panic("not implemented") -} - -func (svc serviceMock) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { +func (svc serviceMock) DeletePolicy(ctx context.Context, req *policies.DeletePolicyReq, _ ...grpc.CallOption) (r *policies.DeletePolicyRes, err error) { panic("not implemented") } - -func (svc serviceMock) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - panic("not implemented") -} - -func (svc serviceMock) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { +func (svc serviceMock) ListPolicies(ctx context.Context, req *policies.ListPoliciesReq, _ ...grpc.CallOption) (r *policies.ListPoliciesRes, err error) { panic("not implemented") } diff --git a/bootstrap/postgres/configs.go b/bootstrap/postgres/configs.go index 8d96d59d1c..fa3418e526 100644 --- a/bootstrap/postgres/configs.go +++ b/bootstrap/postgres/configs.go @@ -8,13 +8,15 @@ import ( "encoding/json" "fmt" "strings" + "time" "github.com/jackc/pgerrcode" "github.com/jackc/pgtype" "github.com/jackc/pgx/v5/pgconn" "github.com/jmoiron/sqlx" "github.com/mainflux/mainflux/bootstrap" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" ) @@ -33,12 +35,12 @@ var _ bootstrap.ConfigRepository = (*configRepository)(nil) type configRepository struct { db *sqlx.DB - log logger.Logger + log mflog.Logger } // NewConfigRepository instantiates a PostgreSQL implementation of config // repository. -func NewConfigRepository(db *sqlx.DB, log logger.Logger) bootstrap.ConfigRepository { +func NewConfigRepository(db *sqlx.DB, log mflog.Logger) bootstrap.ConfigRepository { return &configRepository{db: db, log: log} } @@ -391,7 +393,8 @@ func (cr configRepository) UpdateChannel(c bootstrap.Channel) error { return errors.Wrap(errors.ErrUpdateEntity, err) } - q := `UPDATE channels SET name = :name, metadata = :metadata WHERE mainflux_channel = :mainflux_channel` + q := `UPDATE channels SET name = :name, metadata = :metadata, updated_at = :updated_at, updated_by = :updated_by + WHERE mainflux_channel = :mainflux_channel` if _, err = cr.db.NamedExec(q, dbch); err != nil { return errors.Wrap(errUpdateChannels, err) } @@ -457,9 +460,8 @@ func insertChannels(owner string, channels []bootstrap.Channel, tx *sqlx.Tx) err } chans = append(chans, dbch) } - - q := `INSERT INTO channels (mainflux_channel, owner, name, metadata) - VALUES (:mainflux_channel, :owner, :name, :metadata)` + q := `INSERT INTO channels (mainflux_channel, owner, name, metadata, parent_id, description, created_at, updated_at, updated_by, status) + VALUES (:mainflux_channel, :owner, :name, :metadata, :parent_id, :description, :created_at, :updated_at, :updated_by, :status)` if _, err := tx.NamedExec(q, chans); err != nil { e := err if pqErr, ok := err.(*pgconn.PgError); ok && pqErr.Code == pgerrcode.UniqueViolation { @@ -555,6 +557,17 @@ func nullString(s string) sql.NullString { } } +func nullTime(t time.Time) sql.NullTime { + if t.IsZero() { + return sql.NullTime{} + } + + return sql.NullTime{ + Time: t, + Valid: true, + } +} + type dbConfig struct { MFThing string `db:"mainflux_thing"` Owner string `db:"owner"` @@ -618,17 +631,29 @@ func toConfig(dbcfg dbConfig) bootstrap.Config { } type dbChannel struct { - ID string `db:"mainflux_channel"` - Name sql.NullString `db:"name"` - Owner sql.NullString `db:"owner"` - Metadata string `db:"metadata"` + ID string `db:"mainflux_channel"` + Name sql.NullString `db:"name"` + Owner sql.NullString `db:"owner"` + Metadata string `db:"metadata"` + Parent sql.NullString `db:"parent_id,omitempty"` + Description string `db:"description,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy sql.NullString `db:"updated_by,omitempty"` + Status clients.Status `db:"status"` } func toDBChannel(owner string, ch bootstrap.Channel) (dbChannel, error) { dbch := dbChannel{ - ID: ch.ID, - Name: nullString(ch.Name), - Owner: nullString(owner), + ID: ch.ID, + Name: nullString(ch.Name), + Owner: nullString(owner), + Parent: nullString(ch.Parent), + Description: ch.Description, + CreatedAt: ch.CreatedAt, + UpdatedAt: nullTime(ch.UpdatedAt), + UpdatedBy: nullString(ch.UpdatedBy), + Status: ch.Status, } metadata, err := json.Marshal(ch.Metadata) @@ -642,12 +667,27 @@ func toDBChannel(owner string, ch bootstrap.Channel) (dbChannel, error) { func toChannel(dbch dbChannel) (bootstrap.Channel, error) { ch := bootstrap.Channel{ - ID: dbch.ID, + ID: dbch.ID, + Description: dbch.Description, + CreatedAt: dbch.CreatedAt, + Status: dbch.Status, } if dbch.Name.Valid { ch.Name = dbch.Name.String } + if dbch.Owner.Valid { + ch.Owner = dbch.Owner.String + } + if dbch.Parent.Valid { + ch.Parent = dbch.Parent.String + } + if dbch.UpdatedBy.Valid { + ch.UpdatedBy = dbch.UpdatedBy.String + } + if dbch.UpdatedAt.Valid { + ch.UpdatedAt = dbch.UpdatedAt.Time + } if err := json.Unmarshal([]byte(dbch.Metadata), &ch.Metadata); err != nil { return bootstrap.Channel{}, errors.Wrap(errors.ErrMalformedEntity, err) diff --git a/bootstrap/postgres/configs_test.go b/bootstrap/postgres/configs_test.go index 23ff366ccb..0e802a814f 100644 --- a/bootstrap/postgres/configs_test.go +++ b/bootstrap/postgres/configs_test.go @@ -636,7 +636,7 @@ func TestUpdateChannel(t *testing.T) { break } } - + update.Owner = retreved.Owner assert.Equal(t, update, retreved, fmt.Sprintf("expected %s, go %s", update, retreved)) } diff --git a/bootstrap/postgres/init.go b/bootstrap/postgres/init.go index aa02b619e2..13f37345b1 100644 --- a/bootstrap/postgres/init.go +++ b/bootstrap/postgres/init.go @@ -64,6 +64,17 @@ func Migration() *migrate.MemoryMigrationSource { "CREATE TABLE IF NOT EXISTS unknown_configs", }, }, + { + Id: "configs_3", + Up: []string{ + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS parent_id VARCHAR(36)`, + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS description VARCHAR(1024)`, + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS created_at TIMESTAMP`, + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS updated_at TIMESTAMP`, + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS updated_by VARCHAR(254)`, + `ALTER TABLE IF EXISTS channels ADD COLUMN IF NOT EXISTS status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0)`, + }, + }, }, } } diff --git a/bootstrap/postgres/setup_test.go b/bootstrap/postgres/setup_test.go index 44be5c4d16..f411179cc9 100644 --- a/bootstrap/postgres/setup_test.go +++ b/bootstrap/postgres/setup_test.go @@ -11,8 +11,8 @@ import ( "github.com/jmoiron/sqlx" bootstrapRepo "github.com/mainflux/mainflux/bootstrap/postgres" pgClient "github.com/mainflux/mainflux/internal/clients/postgres" - "github.com/mainflux/mainflux/logger" + dockertest "github.com/ory/dockertest/v3" ) diff --git a/bootstrap/redis/consumer/events.go b/bootstrap/redis/consumer/events.go index f09bf4e353..42f992aa10 100644 --- a/bootstrap/redis/consumer/events.go +++ b/bootstrap/redis/consumer/events.go @@ -3,14 +3,18 @@ package consumer +import "time" + type removeEvent struct { id string } type updateChannelEvent struct { - id string - name string - metadata map[string]interface{} + id string + name string + metadata map[string]interface{} + updatedAt time.Time + updatedBy string } // Connection event is either connect or disconnect event. diff --git a/bootstrap/redis/consumer/streams.go b/bootstrap/redis/consumer/streams.go index a6a6ba6830..deb859fbd8 100644 --- a/bootstrap/redis/consumer/streams.go +++ b/bootstrap/redis/consumer/streams.go @@ -7,19 +7,20 @@ import ( "context" "encoding/json" "fmt" + "time" "github.com/go-redis/redis/v8" "github.com/mainflux/mainflux/bootstrap" "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/pkg/clients" ) const ( stream = "mainflux.things" group = "mainflux.bootstrap" - thingPrefix = "thing." - thingRemove = thingPrefix + "remove" - thingDisconnect = thingPrefix + "disconnect" + thingRemove = "thing.remove" + thingDisconnect = "policy.delete" channelPrefix = "channel." channelUpdate = channelPrefix + "update" @@ -31,7 +32,7 @@ const ( // Subscriber represents event source for things and channels provisioning. type Subscriber interface { // Subscribes to given subject and receives events. - Subscribe(context.Context, string) error + Subscribe(ctx context.Context, subject string) error } type eventStore struct { @@ -96,8 +97,20 @@ func (es eventStore) Subscribe(ctx context.Context, subject string) error { } func decodeRemoveThing(event map[string]interface{}) removeEvent { - return removeEvent{ - id: read(event, "id", ""), + status := read(event, "status", "") + st, err := clients.ToStatus(status) + if err != nil { + return removeEvent{} + } + switch st { + case clients.EnabledStatus: + return removeEvent{} + case clients.DisabledStatus: + return removeEvent{ + id: read(event, "id", ""), + } + default: + return removeEvent{} } } @@ -109,15 +122,29 @@ func decodeUpdateChannel(event map[string]interface{}) updateChannelEvent { } return updateChannelEvent{ - id: read(event, "id", ""), - name: read(event, "name", ""), - metadata: metadata, + id: read(event, "id", ""), + name: read(event, "name", ""), + metadata: metadata, + updatedAt: readTime(event, "updated_at", time.Now()), + updatedBy: read(event, "updated_by", ""), } } func decodeRemoveChannel(event map[string]interface{}) removeEvent { - return removeEvent{ - id: read(event, "id", ""), + status := read(event, "status", "") + st, err := clients.ToStatus(status) + if err != nil { + return removeEvent{} + } + switch st { + case clients.EnabledStatus: + return removeEvent{} + case clients.DisabledStatus: + return removeEvent{ + id: read(event, "id", ""), + } + default: + return removeEvent{} } } @@ -130,9 +157,11 @@ func decodeDisconnectThing(event map[string]interface{}) disconnectEvent { func (es eventStore) handleUpdateChannel(ctx context.Context, uce updateChannelEvent) error { channel := bootstrap.Channel{ - ID: uce.id, - Name: uce.name, - Metadata: uce.metadata, + ID: uce.id, + Name: uce.name, + Metadata: uce.metadata, + UpdatedAt: uce.updatedAt, + UpdatedBy: uce.updatedBy, } return es.svc.UpdateChannelHandler(ctx, channel) } @@ -145,3 +174,12 @@ func read(event map[string]interface{}, key, def string) string { return val } + +func readTime(event map[string]interface{}, key string, def time.Time) time.Time { + val, ok := event[key].(time.Time) + if !ok { + return def + } + + return val +} diff --git a/bootstrap/redis/producer/events.go b/bootstrap/redis/producer/events.go index e65d0d719e..e1966da5d0 100644 --- a/bootstrap/redis/producer/events.go +++ b/bootstrap/redis/producer/events.go @@ -4,131 +4,278 @@ package producer import ( + "encoding/json" + "fmt" "strings" - "time" "github.com/mainflux/mainflux/bootstrap" ) const ( - configPrefix = "config." - configCreate = configPrefix + "create" - configUpdate = configPrefix + "update" - configRemove = configPrefix + "remove" + configPrefix = "config." + configCreate = configPrefix + "create" + configUpdate = configPrefix + "update" + configRemove = configPrefix + "remove" + configList = configPrefix + "list" + configHandlerRemove = configPrefix + "remove_handler" thingPrefix = "thing." thingBootstrap = thingPrefix + "bootstrap" - thingStateChange = thingPrefix + "state_change" + thingStateChange = thingPrefix + "change_state" thingUpdateConnections = thingPrefix + "update_connections" + thingDisconnect = thingPrefix + "disconnect" + + channelPrefix = "channel." + channelHandlerRemove = channelPrefix + "remove_handler" + channelUpdateHandler = channelPrefix + "update_handler" + + certUpdate = "cert.update" ) type event interface { - encode() map[string]interface{} + encode() (map[string]interface{}, error) } var ( - _ event = (*createConfigEvent)(nil) - _ event = (*updateConfigEvent)(nil) + _ event = (*configEvent)(nil) _ event = (*removeConfigEvent)(nil) _ event = (*bootstrapEvent)(nil) _ event = (*changeStateEvent)(nil) _ event = (*updateConnectionsEvent)(nil) + _ event = (*updateCertEvent)(nil) + _ event = (*listConfigsEvent)(nil) + _ event = (*removeHandlerEvent)(nil) ) -type createConfigEvent struct { - mfThing string - owner string - name string - mfChannels []string - externalID string - content string - timestamp time.Time +type configEvent struct { + bootstrap.Config + operation string } -func (cce createConfigEvent) encode() map[string]interface{} { - return map[string]interface{}{ - "thing_id": cce.mfThing, - "owner": cce.owner, - "name": cce.name, - "channels": strings.Join(cce.mfChannels, ", "), - "external_id": cce.externalID, - "content": cce.content, - "timestamp": cce.timestamp.Unix(), - "operation": configCreate, +func (ce configEvent) encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "state": ce.State.String(), + "operation": ce.operation, } -} - -type updateConfigEvent struct { - mfThing string - name string - content string - timestamp time.Time -} - -func (uce updateConfigEvent) encode() map[string]interface{} { - return map[string]interface{}{ - "thing_id": uce.mfThing, - "name": uce.name, - "content": uce.content, - "timestamp": uce.timestamp.Unix(), - "operation": configUpdate, + if ce.MFThing != "" { + val["mainflux_thing"] = ce.MFThing + } + if ce.Content != "" { + val["content"] = ce.Content + } + if ce.Owner != "" { + val["owner"] = ce.Owner + } + if ce.Name != "" { + val["name"] = ce.Name + } + if ce.ExternalID != "" { + val["external_id"] = ce.ExternalID + } + if len(ce.MFChannels) > 0 { + channels := make([]string, len(ce.MFChannels)) + for i, ch := range ce.MFChannels { + channels[i] = ch.ID + } + val["channels"] = fmt.Sprintf("[%s]", strings.Join(channels, ", ")) } + if ce.ClientCert != "" { + val["client_cert"] = ce.ClientCert + } + if ce.ClientKey != "" { + val["client_key"] = ce.ClientKey + } + if ce.CACert != "" { + val["ca_cert"] = ce.CACert + } + if ce.Content != "" { + val["content"] = ce.Content + } + + return val, nil } type removeConfigEvent struct { - mfThing string - timestamp time.Time + mfThing string } -func (rce removeConfigEvent) encode() map[string]interface{} { +func (rce removeConfigEvent) encode() (map[string]interface{}, error) { return map[string]interface{}{ "thing_id": rce.mfThing, - "timestamp": rce.timestamp.Unix(), "operation": configRemove, + }, nil +} + +type listConfigsEvent struct { + offset uint64 + limit uint64 + fullMatch map[string]string + partialMatch map[string]string +} + +func (rce listConfigsEvent) encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "offset": rce.offset, + "limit": rce.limit, + "operation": configList, } + if len(rce.fullMatch) > 0 { + data, err := json.Marshal(rce.fullMatch) + if err != nil { + return map[string]interface{}{}, err + } + + val["full_match"] = data + } + + if len(rce.partialMatch) > 0 { + data, err := json.Marshal(rce.partialMatch) + if err != nil { + return map[string]interface{}{}, err + } + + val["full_match"] = data + } + return val, nil } type bootstrapEvent struct { + bootstrap.Config externalID string success bool - timestamp time.Time } -func (be bootstrapEvent) encode() map[string]interface{} { - return map[string]interface{}{ +func (be bootstrapEvent) encode() (map[string]interface{}, error) { + val := map[string]interface{}{ "external_id": be.externalID, "success": be.success, - "timestamp": be.timestamp.Unix(), "operation": thingBootstrap, } + + if be.MFThing != "" { + val["mainflux_thing"] = be.MFThing + } + if be.Content != "" { + val["content"] = be.Content + } + if be.Owner != "" { + val["owner"] = be.Owner + } + if be.Name != "" { + val["name"] = be.Name + } + if be.ExternalID != "" { + val["external_id"] = be.ExternalID + } + if len(be.MFChannels) > 0 { + channels := make([]string, len(be.MFChannels)) + for i, ch := range be.MFChannels { + channels[i] = ch.ID + } + val["channels"] = fmt.Sprintf("[%s]", strings.Join(channels, ", ")) + } + if be.ClientCert != "" { + val["client_cert"] = be.ClientCert + } + if be.ClientKey != "" { + val["client_key"] = be.ClientKey + } + if be.CACert != "" { + val["ca_cert"] = be.CACert + } + if be.Content != "" { + val["content"] = be.Content + } + return val, nil } type changeStateEvent struct { - mfThing string - state bootstrap.State - timestamp time.Time + mfThing string + state bootstrap.State } -func (cse changeStateEvent) encode() map[string]interface{} { +func (cse changeStateEvent) encode() (map[string]interface{}, error) { return map[string]interface{}{ "thing_id": cse.mfThing, "state": cse.state.String(), - "timestamp": cse.timestamp.Unix(), "operation": thingStateChange, - } + }, nil } type updateConnectionsEvent struct { mfThing string mfChannels []string - timestamp time.Time } -func (uce updateConnectionsEvent) encode() map[string]interface{} { +func (uce updateConnectionsEvent) encode() (map[string]interface{}, error) { return map[string]interface{}{ "thing_id": uce.mfThing, - "channels": strings.Join(uce.mfChannels, ", "), - "timestamp": uce.timestamp.Unix(), + "channels": fmt.Sprintf("[%s]", strings.Join(uce.mfChannels, ", ")), "operation": thingUpdateConnections, + }, nil +} + +type updateCertEvent struct { + thingKey, clientCert, clientKey, caCert string +} + +func (uce updateCertEvent) encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "thing_key": uce.thingKey, + "client_cert": uce.clientCert, + "client_key": uce.clientKey, + "ca_cert": uce.caCert, + "operation": certUpdate, + }, nil +} + +type removeHandlerEvent struct { + id string + operation string +} + +func (rhe removeHandlerEvent) encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "config_id": rhe.id, + "operation": rhe.operation, + }, nil +} + +type updateChannelHandlerEvent struct { + bootstrap.Channel +} + +func (uche updateChannelHandlerEvent) encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": channelUpdateHandler, + } + + if uche.ID != "" { + val["channel_id"] = uche.ID } + if uche.Name != "" { + val["name"] = uche.Name + } + if uche.Metadata != nil { + metadata, err := json.Marshal(uche.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + return val, nil +} + +type disconnectThingEvent struct { + thingID string + channelID string +} + +func (dte disconnectThingEvent) encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "thing_id": dte.thingID, + "channel_id": dte.channelID, + "operation": thingDisconnect, + }, nil } diff --git a/bootstrap/redis/producer/streams.go b/bootstrap/redis/producer/streams.go index e44dee43eb..55706510bc 100644 --- a/bootstrap/redis/producer/streams.go +++ b/bootstrap/redis/producer/streams.go @@ -5,10 +5,10 @@ package producer import ( "context" - "time" "github.com/go-redis/redis/v8" "github.com/mainflux/mainflux/bootstrap" + "github.com/mainflux/mainflux/pkg/errors" ) const ( @@ -38,28 +38,31 @@ func (es eventStore) Add(ctx context.Context, token string, cfg bootstrap.Config return saved, err } - var channels []string - for _, ch := range saved.MFChannels { - channels = append(channels, ch.ID) + ev := configEvent{ + saved, configCreate, } - ev := createConfigEvent{ - mfThing: saved.MFThing, - owner: saved.Owner, - name: saved.Name, - mfChannels: channels, - externalID: saved.ExternalID, - content: saved.Content, - timestamp: time.Now(), + if err1 := es.add(ctx, ev); err1 != nil { + return saved, errors.Wrap(err, err1) } - err = es.add(ctx, ev) - return saved, err } func (es eventStore) View(ctx context.Context, token, id string) (bootstrap.Config, error) { - return es.svc.View(ctx, token, id) + cfg, err := es.svc.View(ctx, token, id) + if err != nil { + return cfg, err + } + ev := configEvent{ + cfg, configList, + } + + if err1 := es.add(ctx, ev); err1 != nil { + return cfg, errors.Wrap(err, err1) + } + + return cfg, err } func (es eventStore) Update(ctx context.Context, token string, cfg bootstrap.Config) error { @@ -67,18 +70,26 @@ func (es eventStore) Update(ctx context.Context, token string, cfg bootstrap.Con return err } - ev := updateConfigEvent{ - mfThing: cfg.MFThing, - name: cfg.Name, - content: cfg.Content, - timestamp: time.Now(), + ev := configEvent{ + cfg, configUpdate, } return es.add(ctx, ev) } func (es eventStore) UpdateCert(ctx context.Context, token, thingKey, clientCert, clientKey, caCert string) error { - return es.svc.UpdateCert(ctx, token, thingKey, clientCert, clientKey, caCert) + if err := es.svc.UpdateCert(ctx, token, thingKey, clientCert, clientKey, caCert); err != nil { + return err + } + + ev := updateCertEvent{ + thingKey: thingKey, + clientCert: clientCert, + clientKey: clientKey, + caCert: caCert, + } + + return es.add(ctx, ev) } func (es eventStore) UpdateConnections(ctx context.Context, token, id string, connections []string) error { @@ -89,14 +100,29 @@ func (es eventStore) UpdateConnections(ctx context.Context, token, id string, co ev := updateConnectionsEvent{ mfThing: id, mfChannels: connections, - timestamp: time.Now(), } return es.add(ctx, ev) } func (es eventStore) List(ctx context.Context, token string, filter bootstrap.Filter, offset, limit uint64) (bootstrap.ConfigsPage, error) { - return es.svc.List(ctx, token, filter, offset, limit) + bp, err := es.svc.List(ctx, token, filter, offset, limit) + if err != nil { + return bp, err + } + + ev := listConfigsEvent{ + offset: offset, + limit: limit, + fullMatch: filter.FullMatch, + partialMatch: filter.PartialMatch, + } + + if err1 := es.add(ctx, ev); err1 != nil { + return bp, errors.Wrap(err, err1) + } + + return bp, nil } func (es eventStore) Remove(ctx context.Context, token, id string) error { @@ -105,8 +131,7 @@ func (es eventStore) Remove(ctx context.Context, token, id string) error { } ev := removeConfigEvent{ - mfThing: id, - timestamp: time.Now(), + mfThing: id, } return es.add(ctx, ev) @@ -116,15 +141,18 @@ func (es eventStore) Bootstrap(ctx context.Context, externalKey, externalID stri cfg, err := es.svc.Bootstrap(ctx, externalKey, externalID, secure) ev := bootstrapEvent{ - externalID: externalID, - timestamp: time.Now(), - success: true, + cfg, + externalID, + true, } if err != nil { ev.success = false } - _ = es.add(ctx, ev) + + if err1 := es.add(ctx, ev); err1 != nil { + return cfg, err1 + } return cfg, err } @@ -135,35 +163,74 @@ func (es eventStore) ChangeState(ctx context.Context, token, id string, state bo } ev := changeStateEvent{ - mfThing: id, - state: state, - timestamp: time.Now(), + mfThing: id, + state: state, } return es.add(ctx, ev) } func (es eventStore) RemoveConfigHandler(ctx context.Context, id string) error { - return es.svc.RemoveConfigHandler(ctx, id) + if err := es.svc.RemoveConfigHandler(ctx, id); err != nil { + return err + } + + ev := removeHandlerEvent{ + id: id, + operation: configHandlerRemove, + } + + return es.add(ctx, ev) } func (es eventStore) RemoveChannelHandler(ctx context.Context, id string) error { - return es.svc.RemoveChannelHandler(ctx, id) + if err := es.svc.RemoveChannelHandler(ctx, id); err != nil { + return err + } + + ev := removeHandlerEvent{ + id: id, + operation: channelHandlerRemove, + } + + return es.add(ctx, ev) } func (es eventStore) UpdateChannelHandler(ctx context.Context, channel bootstrap.Channel) error { - return es.svc.UpdateChannelHandler(ctx, channel) + if err := es.svc.UpdateChannelHandler(ctx, channel); err != nil { + return err + } + + ev := updateChannelHandlerEvent{ + channel, + } + + return es.add(ctx, ev) } func (es eventStore) DisconnectThingHandler(ctx context.Context, channelID, thingID string) error { - return es.svc.DisconnectThingHandler(ctx, channelID, thingID) + if err := es.svc.DisconnectThingHandler(ctx, channelID, thingID); err != nil { + return err + } + + ev := disconnectThingEvent{ + channelID, + thingID, + } + + return es.add(ctx, ev) } func (es eventStore) add(ctx context.Context, ev event) error { + values, err := ev.encode() + if err != nil { + return err + } + record := &redis.XAddArgs{ Stream: streamID, MaxLenApprox: streamLen, - Values: ev.encode(), + Values: values, } return es.client.XAdd(ctx, record).Err() diff --git a/bootstrap/redis/producer/streams_test.go b/bootstrap/redis/producer/streams_test.go index 04cb2d150d..f7aa0ba4c2 100644 --- a/bootstrap/redis/producer/streams_test.go +++ b/bootstrap/redis/producer/streams_test.go @@ -13,17 +13,23 @@ import ( "time" "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux" + "github.com/go-zoo/bone" "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" - "github.com/opentracing/opentracing-go/mocktracer" "github.com/mainflux/mainflux/bootstrap" "github.com/mainflux/mainflux/bootstrap/mocks" "github.com/mainflux/mainflux/bootstrap/redis/producer" + mfclients "github.com/mainflux/mainflux/pkg/clients" + mfgroups "github.com/mainflux/mainflux/pkg/groups" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/things" - httpapi "github.com/mainflux/mainflux/things/api/things/http" + "github.com/mainflux/mainflux/things/clients" + capi "github.com/mainflux/mainflux/things/clients/api" + "github.com/mainflux/mainflux/things/groups" + gapi "github.com/mainflux/mainflux/things/groups/api" + tpolicies "github.com/mainflux/mainflux/things/policies" + papi "github.com/mainflux/mainflux/things/policies/api/http" + upolicies "github.com/mainflux/mainflux/users/policies" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -63,7 +69,7 @@ var ( } ) -func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { +func newService(auth upolicies.AuthServiceClient, url string) bootstrap.Service { configs := mocks.NewConfigsRepository() config := mfsdk.Config{ ThingsURL: url, @@ -73,25 +79,33 @@ func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { return bootstrap.New(auth, configs, sdk, encKey) } -func newThingsService(auth mainflux.AuthServiceClient) things.Service { - channels := make(map[string]things.Channel, channelsNum) +func newThingsService(auth upolicies.AuthServiceClient) (clients.Service, groups.Service, tpolicies.Service) { + channels := make(map[string]mfgroups.Group, channelsNum) for i := 0; i < channelsNum; i++ { id := strconv.Itoa(i + 1) - channels[id] = things.Channel{ + channels[id] = mfgroups.Group{ ID: id, Owner: email, Metadata: map[string]interface{}{"meta": "data"}, + Status: mfclients.EnabledStatus, } } - return mocks.NewThingsService(map[string]things.Thing{}, channels, auth) + csvc := mocks.NewThingsService(map[string]mfclients.Client{}, auth) + gsvc := mocks.NewChannelsService(channels, auth) + psvc := mocks.NewPoliciesService(auth) + return csvc, gsvc, psvc } -func newThingsServer(svc things.Service) *httptest.Server { +func newThingsServer(csvc clients.Service, gsvc groups.Service, psvc tpolicies.Service) *httptest.Server { logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) + mux := bone.New() + capi.MakeHandler(csvc, mux, logger) + gapi.MakeHandler(gsvc, mux, logger) + papi.MakePolicyHandler(csvc, psvc, mux, logger) return httptest.NewServer(mux) } + func TestAdd(t *testing.T) { err := redisClient.FlushAll(context.Background()).Err() assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) @@ -156,9 +170,8 @@ func TestAdd(t *testing.T) { var event map[string]interface{} if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID + event := streams[0].Messages + lastID = event[0].ID } test(t, tc.event, event, tc.desc) @@ -222,11 +235,15 @@ func TestUpdate(t *testing.T) { token: validToken, err: nil, event: map[string]interface{}{ - "thing_id": modified.MFThing, - "name": modified.Name, - "content": modified.Content, - "timestamp": time.Now().Unix(), - "operation": configUpdate, + "name": modified.Name, + "content": modified.Content, + "timestamp": time.Now().Unix(), + "operation": configUpdate, + "channels": "[1, 2]", + "external_id": "external_id", + "mainflux_thing": "1", + "owner": email, + "state": "0", }, }, { @@ -253,6 +270,7 @@ func TestUpdate(t *testing.T) { if len(streams) > 0 && len(streams[0].Messages) > 0 { msg := streams[0].Messages[0] event = msg.Values + event["timestamp"] = msg.ID lastID = msg.ID } @@ -318,9 +336,8 @@ func TestUpdateConnections(t *testing.T) { var event map[string]interface{} if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID + event := streams[0].Messages + lastID = event[0].ID } test(t, tc.event, event, tc.desc) @@ -400,9 +417,8 @@ func TestRemove(t *testing.T) { var event map[string]interface{} if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID + event := streams[0].Messages + lastID = event[0].ID } test(t, tc.event, event, tc.desc) @@ -447,7 +463,7 @@ func TestBootstrap(t *testing.T) { { desc: "bootstrap with an error", externalID: saved.ExternalID, - externalKey: "external_id", + externalKey: "external_id1", err: bootstrap.ErrExternalKey, event: map[string]interface{}{ "external_id": "external_id", @@ -471,9 +487,8 @@ func TestBootstrap(t *testing.T) { var event map[string]interface{} if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID + event := streams[0].Messages + lastID = event[0].ID } test(t, tc.event, event, tc.desc) } @@ -539,9 +554,8 @@ func TestChangeState(t *testing.T) { var event map[string]interface{} if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID + event := streams[0].Messages + lastID = event[0].ID } test(t, tc.event, event, tc.desc) @@ -551,9 +565,11 @@ func TestChangeState(t *testing.T) { func test(t *testing.T, expected, actual map[string]interface{}, description string) { if expected != nil && actual != nil { ts1 := expected["timestamp"].(int64) + ats := actual["timestamp"].(string) - ts2, err := strconv.ParseInt(actual["timestamp"].(string), 10, 64) + ts2, err := strconv.ParseInt(strings.Split(ats, "-")[0], 10, 64) require.Nil(t, err, fmt.Sprintf("%s: expected to get a valid timestamp, got %s", description, err)) + ts2 = time.UnixMilli(ts2).Unix() val := ts1 == ts2 || ts2 <= ts1+defaultTimout assert.True(t, val, fmt.Sprintf("%s: timestamp is not in valid range", description)) diff --git a/bootstrap/service.go b/bootstrap/service.go index 740a5325e8..4f6eb0be4d 100644 --- a/bootstrap/service.go +++ b/bootstrap/service.go @@ -10,9 +10,9 @@ import ( "encoding/hex" "time" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/users/policies" ) var ( @@ -103,14 +103,14 @@ type ConfigReader interface { } type bootstrapService struct { - auth mainflux.AuthServiceClient + auth policies.AuthServiceClient configs ConfigRepository sdk mfsdk.SDK encKey []byte } // New returns new Bootstrap service. -func New(auth mainflux.AuthServiceClient, configs ConfigRepository, sdk mfsdk.SDK, encKey []byte) Service { +func New(auth policies.AuthServiceClient, configs ConfigRepository, sdk mfsdk.SDK, encKey []byte) Service { return &bootstrapService{ configs: configs, sdk: sdk, @@ -140,7 +140,7 @@ func (bs bootstrapService) Add(ctx context.Context, token string, cfg Config) (C } id := cfg.MFThing - mfThing, err := bs.thing(token, id) + mfThing, err := bs.thing(id, token) if err != nil { return Config{}, errors.Wrap(errThingNotFound, err) } @@ -148,12 +148,12 @@ func (bs bootstrapService) Add(ctx context.Context, token string, cfg Config) (C cfg.MFThing = mfThing.ID cfg.Owner = owner cfg.State = Inactive - cfg.MFKey = mfThing.Key + cfg.MFKey = mfThing.Credentials.Secret saved, err := bs.configs.Save(cfg, toConnect) if err != nil { if id == "" { - if errT := bs.sdk.DeleteThing(cfg.MFThing, token); errT != nil { + if _, errT := bs.sdk.DisableThing(cfg.MFThing, token); errT != nil { err = errors.Wrap(err, errT) } } @@ -364,30 +364,31 @@ func (bs bootstrapService) identify(token string) (string, error) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - res, err := bs.auth.Identify(ctx, &mainflux.Token{Value: token}) + res, err := bs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return "", errors.ErrAuthentication } - return res.GetEmail(), nil + return res.GetId(), nil } // Method thing retrieves Mainflux Thing creating one if an empty ID is passed. -func (bs bootstrapService) thing(token, id string) (mfsdk.Thing, error) { - thingID := id +func (bs bootstrapService) thing(id, token string) (mfsdk.Thing, error) { + var thing mfsdk.Thing var err error + thing.ID = id if id == "" { - thingID, err = bs.sdk.CreateThing(mfsdk.Thing{}, token) + thing, err = bs.sdk.CreateThing(mfsdk.Thing{}, token) if err != nil { return mfsdk.Thing{}, errors.Wrap(errCreateThing, err) } } - thing, err := bs.sdk.Thing(thingID, token) + thing, err = bs.sdk.Thing(thing.ID, token) if err != nil { if id != "" { - if errT := bs.sdk.DeleteThing(thingID, token); errT != nil { + if _, errT := bs.sdk.DisableThing(thing.ID, token); errT != nil { err = errors.Wrap(err, errT) } } diff --git a/bootstrap/service_test.go b/bootstrap/service_test.go index 052bd30ae9..cf48c5cf98 100644 --- a/bootstrap/service_test.go +++ b/bootstrap/service_test.go @@ -15,17 +15,23 @@ import ( "strconv" "testing" - "github.com/opentracing/opentracing-go/mocktracer" + "github.com/go-zoo/bone" "github.com/gofrs/uuid" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/bootstrap" "github.com/mainflux/mainflux/bootstrap/mocks" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/things" - httpapi "github.com/mainflux/mainflux/things/api/things/http" + "github.com/mainflux/mainflux/things/clients" + capi "github.com/mainflux/mainflux/things/clients/api" + "github.com/mainflux/mainflux/things/groups" + gapi "github.com/mainflux/mainflux/things/groups/api" + tpolicies "github.com/mainflux/mainflux/things/policies" + papi "github.com/mainflux/mainflux/things/policies/api/http" + upolicies "github.com/mainflux/mainflux/users/policies" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -55,7 +61,7 @@ var ( } ) -func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { +func newService(auth upolicies.AuthServiceClient, url string) bootstrap.Service { things := mocks.NewConfigsRepository() config := mfsdk.Config{ ThingsURL: url, @@ -65,23 +71,30 @@ func newService(auth mainflux.AuthServiceClient, url string) bootstrap.Service { return bootstrap.New(auth, things, sdk, encKey) } -func newThingsService(auth mainflux.AuthServiceClient) things.Service { - channels := make(map[string]things.Channel, channelsNum) +func newThingsService(auth upolicies.AuthServiceClient) (clients.Service, groups.Service, tpolicies.Service) { + channels := make(map[string]mfgroups.Group, channelsNum) for i := 0; i < channelsNum; i++ { id := strconv.Itoa(i + 1) - channels[id] = things.Channel{ + channels[id] = mfgroups.Group{ ID: id, Owner: email, Metadata: map[string]interface{}{"meta": "data"}, + Status: mfclients.EnabledStatus, } } - return mocks.NewThingsService(map[string]things.Thing{}, channels, auth) + csvc := mocks.NewThingsService(map[string]mfclients.Client{}, auth) + gsvc := mocks.NewChannelsService(channels, auth) + psvc := mocks.NewPoliciesService(auth) + return csvc, gsvc, psvc } -func newThingsServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) +func newThingsServer(csvc clients.Service, gsvc groups.Service, psvc tpolicies.Service) *httptest.Server { + logger := mflog.NewMock() + mux := bone.New() + capi.MakeHandler(csvc, mux, logger) + gapi.MakeHandler(gsvc, mux, logger) + papi.MakePolicyHandler(csvc, psvc, mux, logger) return httptest.NewServer(mux) } @@ -648,7 +661,7 @@ func TestChangeState(t *testing.T) { for _, tc := range cases { err := svc.ChangeState(context.Background(), tc.token, tc.id, tc.state) - assert.True(t, errors.Contains(err, tc.err), err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) } } diff --git a/certs/api/logging.go b/certs/api/logging.go index ae7dde256b..27448aef45 100644 --- a/certs/api/logging.go +++ b/certs/api/logging.go @@ -11,18 +11,18 @@ import ( "time" "github.com/mainflux/mainflux/certs" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" ) var _ certs.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc certs.Service } // NewLoggingMiddleware adds logging facilities to the core service. -func NewLoggingMiddleware(svc certs.Service, logger log.Logger) certs.Service { +func NewLoggingMiddleware(svc certs.Service, logger mflog.Logger) certs.Service { return &loggingMiddleware{logger, svc} } diff --git a/certs/postgres/certs.go b/certs/postgres/certs.go index d91bfd6532..be7b1588b9 100644 --- a/certs/postgres/certs.go +++ b/certs/postgres/certs.go @@ -11,7 +11,6 @@ import ( "github.com/jackc/pgerrcode" "github.com/jackc/pgx/v5/pgconn" - "github.com/jmoiron/sqlx" "github.com/mainflux/mainflux/certs" "github.com/mainflux/mainflux/logger" diff --git a/certs/service.go b/certs/service.go index b88ce4e522..c39a410fbc 100644 --- a/certs/service.go +++ b/certs/service.go @@ -7,10 +7,10 @@ import ( "context" "time" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/certs/pki" "github.com/mainflux/mainflux/pkg/errors" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/users/policies" ) var ( @@ -45,14 +45,14 @@ type Service interface { } type certsService struct { - auth mainflux.AuthServiceClient + auth policies.AuthServiceClient certsRepo Repository sdk mfsdk.SDK pki pki.Agent } // New returns new Certs service -func New(auth mainflux.AuthServiceClient, certs Repository, sdk mfsdk.SDK, pki pki.Agent) Service { +func New(auth policies.AuthServiceClient, certs Repository, sdk mfsdk.SDK, pki pki.Agent) Service { return &certsService{ certsRepo: certs, sdk: sdk, @@ -80,7 +80,7 @@ type Cert struct { } func (cs *certsService) IssueCert(ctx context.Context, token, thingID string, ttl string) (Cert, error) { - owner, err := cs.auth.Identify(ctx, &mainflux.Token{Value: token}) + owner, err := cs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Cert{}, err } @@ -90,7 +90,7 @@ func (cs *certsService) IssueCert(ctx context.Context, token, thingID string, tt return Cert{}, errors.Wrap(ErrFailedCertCreation, err) } - cert, err := cs.pki.IssueCert(thing.Key, ttl) + cert, err := cs.pki.IssueCert(thing.Credentials.Secret, ttl) if err != nil { return Cert{}, errors.Wrap(ErrFailedCertCreation, err) } @@ -113,7 +113,7 @@ func (cs *certsService) IssueCert(ctx context.Context, token, thingID string, tt func (cs *certsService) RevokeCert(ctx context.Context, token, thingID string) (Revoke, error) { var revoke Revoke - u, err := cs.auth.Identify(ctx, &mainflux.Token{Value: token}) + u, err := cs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return revoke, err } @@ -144,7 +144,7 @@ func (cs *certsService) RevokeCert(ctx context.Context, token, thingID string) ( } func (cs *certsService) ListCerts(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) { - u, err := cs.auth.Identify(ctx, &mainflux.Token{Value: token}) + u, err := cs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Page{}, err } @@ -167,7 +167,7 @@ func (cs *certsService) ListCerts(ctx context.Context, token, thingID string, of } func (cs *certsService) ListSerials(ctx context.Context, token, thingID string, offset, limit uint64) (Page, error) { - u, err := cs.auth.Identify(ctx, &mainflux.Token{Value: token}) + u, err := cs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Page{}, err } @@ -176,7 +176,7 @@ func (cs *certsService) ListSerials(ctx context.Context, token, thingID string, } func (cs *certsService) ViewCert(ctx context.Context, token, serialID string) (Cert, error) { - u, err := cs.auth.Identify(ctx, &mainflux.Token{Value: token}) + u, err := cs.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Cert{}, err } diff --git a/certs/service_test.go b/certs/service_test.go index 037a5213e6..31583994d4 100644 --- a/certs/service_test.go +++ b/certs/service_test.go @@ -12,17 +12,18 @@ import ( "testing" "time" - "github.com/mainflux/mainflux" + "github.com/go-zoo/bone" bsmocks "github.com/mainflux/mainflux/bootstrap/mocks" "github.com/mainflux/mainflux/certs" "github.com/mainflux/mainflux/certs/mocks" "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/things" - httpapi "github.com/mainflux/mainflux/things/api/things/http" - thmocks "github.com/mainflux/mainflux/things/mocks" - "github.com/opentracing/opentracing-go/mocktracer" + "github.com/mainflux/mainflux/things/clients" + httpapi "github.com/mainflux/mainflux/things/clients/api" + thmocks "github.com/mainflux/mainflux/things/clients/mocks" + "github.com/mainflux/mainflux/users/policies" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -48,8 +49,9 @@ func newService(tokens map[string]string) (certs.Service, error) { ac := bsmocks.NewAuthClient(map[string]string{token: email}) server := newThingsServer(newThingsService(ac)) - policies := []thmocks.MockSubjectSet{{Object: "users", Relation: "member"}} - auth := thmocks.NewAuthService(tokens, map[string][]thmocks.MockSubjectSet{email: policies}) + policies := []thmocks.MockSubjectSet{{Object: "token", Relation: clients.AdminRelationKey}} + auth := thmocks.NewAuthService(tokens, map[string][]thmocks.MockSubjectSet{token: policies}) + config := mfsdk.Config{ ThingsURL: server.URL, } @@ -72,18 +74,20 @@ func newService(tokens map[string]string) (certs.Service, error) { return certs.New(auth, repo, sdk, pki), nil } -func newThingsService(auth mainflux.AuthServiceClient) things.Service { - ths := make(map[string]things.Thing, thingsNum) +func newThingsService(auth policies.AuthServiceClient) clients.Service { + ths := make(map[string]mfclients.Client, thingsNum) for i := 0; i < thingsNum; i++ { id := strconv.Itoa(i + 1) - ths[id] = things.Thing{ - ID: id, - Key: thingKey, + ths[id] = mfclients.Client{ + ID: id, + Credentials: mfclients.Credentials{ + Secret: thingKey, + }, Owner: email, } } - return bsmocks.NewThingsService(ths, map[string]things.Channel{}, auth) + return bsmocks.NewThingsService(ths, auth) } func TestIssueCert(t *testing.T) { @@ -359,8 +363,9 @@ func TestViewCert(t *testing.T) { } } -func newThingsServer(svc things.Service) *httptest.Server { +func newThingsServer(svc clients.Service) *httptest.Server { logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) + mux := bone.New() + httpapi.MakeHandler(svc, mux, logger) return httptest.NewServer(mux) } diff --git a/cli/README.md b/cli/README.md index ee111f2b10..015e66cac9 100644 --- a/cli/README.md +++ b/cli/README.md @@ -1,134 +1,280 @@ # Mainflux CLI + ## Build + From the project root: + ```bash make cli ``` ## Usage + ### Service + #### Get Mainflux Things services Health Check + ```bash mainflux-cli health ``` ### Users management + #### Create User + ```bash -mainflux-cli users create +mainflux-cli users create + +mainflux-cli users create ``` #### Login User + ```bash mainflux-cli users token ``` -#### Retrieve User +#### Get User + +```bash +mainflux-cli users get +``` + +#### Get Users + ```bash -mainflux-cli users get +mainflux-cli users get all ``` #### Update User Metadata + ```bash -mainflux-cli users update '{"key1":"value1", "key2":"value2"}' +mainflux-cli users update '{"name":"value1", "metadata":{"value2": "value3"}}' ``` #### Update User Password + +```bash +mainflux-cli users password +``` + +#### Enable User + +```bash +mainflux-cli users enable +``` + +#### Disable User + ```bash -mainflux-cli users password +mainflux-cli users disable ``` ### System Provisioning + #### Create Thing + ```bash -mainflux-cli things create '{"name":"myThing"}' +mainflux-cli things create '{"name":"myThing"}' ``` #### Create Thing with metadata + ```bash -mainflux-cli things create '{"name":"myThing", "metadata": {\"key1\":\"value1\"}}' +mainflux-cli things create '{"name":"myThing", "metadata": {"key1":"value1"}}' ``` #### Bulk Provision Things + ```bash -mainflux-cli provision things +mainflux-cli provision things ``` -* `file` - A CSV or JSON file containing things -* `user_auth_token` - A valid user auth token for the current system +* `file` - A CSV or JSON file containing thing names (must have extension `.csv` or `.json`) +* `user_token` - A valid user auth token for the current system + +An example CSV file might be: + +```csv +thing1, +thing2, +thing3, +``` + +in which the first column is the thing's name. + +A comparable JSON file would be + +```json +[{ + "name": "", + "status": "enabled" + }, + { + "name": "", + "status": "disabled" + }, { + + "name": "", + "status": "enabled", + "credentials": { + "identity": "", + "secret": "" + } + } +] +``` + +With JSON you can be able to specify more fields of the channels you want to create #### Update Thing + ```bash -mainflux-cli things update '{"id":"", "name":"myNewName"}' +mainflux-cli things update '{"name":"value1", "metadata":{"key1": "value2"}}' ``` -#### Remove Thing +#### Identify Thing + ```bash -mainflux-cli things delete +mainflux-cli things identify ``` -#### Retrieve a subset list of provisioned Things +#### Enable Thing + ```bash -mainflux-cli things get all --offset=1 --limit=5 +mainflux-cli things enable ``` -#### Retrieve Thing By ID +#### Disable Thing + ```bash -mainflux-cli things get +mainflux-cli things disable +``` + +#### Get Thing + +```bash +mainflux-cli things get +``` + +#### Get Things + +```bash +mainflux-cli things get all +``` + +#### Get a subset list of provisioned Things + +```bash +mainflux-cli things get all --offset=1 --limit=5 ``` #### Create Channel + ```bash -mainflux-cli channels create '{"name":"myChannel"}' +mainflux-cli channels create '{"name":"myChannel"}' ``` #### Bulk Provision Channels + ```bash -mainflux-cli provision channels +mainflux-cli provision channels +``` + +* `file` - A CSV or JSON file containing channel names (must have extension `.csv` or `.json`) +* `user_token` - A valid user auth token for the current system + +An example CSV file might be: + +```csv +, +, +, ``` -* `file` - A CSV or JSON file containing channels -* `user_auth_token` - A valid user auth token for the current system +in which the first column is channel names. + +A comparable JSON file would be + +```json +[{ + "name": "", + "description": "", + "status": "enabled" + }, + { + "name": "", + "description": "", + "status": "disabled" + }, { + + "name": "", + "description": "", + "status": "enabled" + } +] +``` + +With JSON you can be able to specify more fields of the channels you want to create #### Update Channel + ```bash -mainflux-cli channels update '{"id":"","name":"myNewName"}' +mainflux-cli channels update '{"id":"","name":"myNewName"}' ``` -#### Remove Channel +#### Enable Channel + ```bash -mainflux-cli channels delete +mainflux-cli channels enable ``` -#### Retrieve a subset list of provisioned Channels +#### Disable Channel + ```bash -mainflux-cli channels get all --offset=1 --limit=5 +mainflux-cli channels disable ``` -#### Retrieve Channel By ID +#### Get Channel + +```bash +mainflux-cli channels get +``` + +#### Get Channels + ```bash -mainflux-cli channels get +mainflux-cli channels get all +``` + +#### Get a subset list of provisioned Channels + +```bash +mainflux-cli channels get all --offset=1 --limit=5 ``` ### Access control + #### Connect Thing to Channel + ```bash -mainflux-cli things connect +mainflux-cli things connect ``` #### Bulk Connect Things to Channels + ```bash -mainflux-cli provision connect +mainflux-cli provision connect ``` -* `file` - A CSV or JSON file containing thing and channel ids -* `user_auth_token` - A valid user auth token for the current system +* `file` - A CSV or JSON file containing thing and channel ids (must have extension `.csv` or `.json`) +* `user_token` - A valid user auth token for the current system An example CSV file might be ```csv -, -, +, +, ``` in which the first column is thing IDs and the second column is channel IDs. A connection will be created for each thing to each channel. This example would result in 4 connections being created. @@ -137,120 +283,133 @@ A comparable JSON file would be ```json { - "thing_ids": [ - "", - "" + "client_ids": [ + "", + "" ], - "channel_ids": [ - "", - "" + "group_ids": [ + "", + "" ] } ``` #### Disconnect Thing from Channel -```bash -mainflux-cli things disconnect +```bash +mainflux-cli things disconnect ``` -#### Retrieve a subset list of Channels connected to Thing +#### Get a subset list of Channels connected to Thing + ```bash -mainflux-cli things connections +mainflux-cli things connections ``` -#### Retrieve a subset list of Things connected to Channel +#### Get a subset list of Things connected to Channel + ```bash -mainflux-cli channels connections +mainflux-cli channels connections ``` - ### Messaging + #### Send a message over HTTP + ```bash -mainflux-cli messages send '[{"bn":"Dev1","n":"temp","v":20}, {"n":"hum","v":40}, {"bn":"Dev2", "n":"temp","v":20}, {"n":"hum","v":40}]' +mainflux-cli messages send '[{"bn":"Dev1","n":"temp","v":20}, {"n":"hum","v":40}, {"bn":"Dev2", "n":"temp","v":20}, {"n":"hum","v":40}]' ``` #### Read messages over HTTP + ```bash -mainflux-cli messages read +mainflux-cli messages read -R ``` ### Bootstrap #### Add configuration + ```bash -mainflux-cli bootstrap create '{"external_id": "myExtID", "external_key": "myExtKey", "name": "myName", "content": "myContent"}' +mainflux-cli bootstrap create '{"external_id": "myExtID", "external_key": "myExtKey", "name": "myName", "content": "myContent"}' -b ``` #### View configuration + ```bash -mainflux-cli bootstrap view +mainflux-cli bootstrap get -b ``` #### Update configuration + ```bash -mainflux-cli bootstrap update '{"MFThing":"", "name": "newName", "content": "newContent"}' +mainflux-cli bootstrap update '{"mainflux_id":"", "name": "newName", "content": "newContent"}' -b ``` #### Remove configuration + ```bash -mainflux-cli bootstrap remove +mainflux-cli bootstrap remove -b ``` #### Bootstrap configuration + ```bash -mainflux-cli bootstrap bootstrap +mainflux-cli bootstrap bootstrap -b ``` ### Groups -#### Create new group -```bash -mainflux-cli groups create '{"name":"","parent_id":"","description":"","metadata":{"key":"value",...}}' -``` -#### Delete group -```bash -mainflux-cli groups delete -``` -#### Get group with id -```bash -mainflux-cli groups get -``` -#### List all groups + +#### Create Group + ```bash -mainflux-cli groups get all +mainflux-cli groups create '{"name":"","description":"","parentID":"","metadata":""}' ``` -#### List children groups for some group + +#### Get Group + ```bash -mainflux-cli groups get children +mainflux-cli groups get ``` -#### Assign user to a group + +#### Get Groups + ```bash -mainflux-cli groups assign +mainflux-cli groups get all ``` -#### Unassign user from group + +#### Get Group Members + ```bash -mainflux-cli groups unassign +mainflux-cli groups members ``` -#### List users for a group + +#### Get Memberships + ```bash -mainflux-cli groups members +mainflux-cli groups membership ``` -#### List groups that user belongs to + +#### Assign Members to Group + ```bash -mainflux-cli groups membership +mainflux-cli groups assign ``` -### Keys management -#### Issue a new Key +#### Unassign Members to Group + ```bash -mainflux-cli keys issue +mainflux-cli groups unassign ``` -#### Remove API key from database + +#### Enable Group + ```bash -mainflux-cli keys revoke +mainflux-cli groups enable ``` -#### Retrieve API key with given id + +#### Disable Group + ```bash -mainflux-cli keys retrieve +mainflux-cli groups disable ``` diff --git a/cli/channels.go b/cli/channels.go index 369c314921..498c7999ce 100644 --- a/cli/channels.go +++ b/cli/channels.go @@ -29,13 +29,13 @@ var cmdChannels = []cobra.Command{ return } - id, err := sdk.CreateChannel(channel, args[1]) + channel, err := sdk.CreateChannel(channel, args[1]) if err != nil { logError(err) return } - logCreated(id) + logJSON(channel) }, }, { @@ -82,91 +82,89 @@ var cmdChannels = []cobra.Command{ }, }, { - Use: "update ", + Use: "update ", Short: "Update channel", Long: `Updates channel record`, Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { + if len(args) != 3 { logUsage(cmd.Use) return } var channel mfxsdk.Channel - if err := json.Unmarshal([]byte(args[0]), &channel); err != nil { + if err := json.Unmarshal([]byte(args[1]), &channel); err != nil { logError(err) return } - - if err := sdk.UpdateChannel(channel, args[1]); err != nil { + channel.ID = args[0] + channel, err := sdk.UpdateChannel(channel, args[2]) + if err != nil { logError(err) return } - logOK() + logJSON(channel) }, }, { - Use: "delete ", - Short: "Delete channel", - Long: `Delete channel by ID`, + Use: "connections ", + Short: "Connections list", + Long: `List of Things connected to a Channel`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 2 { logUsage(cmd.Use) return } - - if err := sdk.DeleteChannel(args[0], args[1]); err != nil { + pm := mfxsdk.PageMetadata{ + Offset: uint64(Offset), + Limit: uint64(Limit), + Disconnected: false, + } + cl, err := sdk.ThingsByChannel(args[0], pm, args[1]) + if err != nil { logError(err) return } - logOK() + logJSON(cl) }, }, { - Use: "connections ", - Short: "Connections list", - Long: `List of Things connected to a Channel`, + Use: "enable ", + Short: "Change channel status to enabled", + Long: `Change channel status to enabled`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 2 { logUsage(cmd.Use) return } - pm := mfxsdk.PageMetadata{ - Offset: uint64(Offset), - Limit: uint64(Limit), - Disconnected: false, - } - cl, err := sdk.ThingsByChannel(args[0], pm, args[1]) + + channel, err := sdk.EnableChannel(args[0], args[1]) if err != nil { logError(err) return } - logJSON(cl) + logJSON(channel) }, }, { - Use: "not-connected ", - Short: "Not-connected list", - Long: `List of Things not connected to a Channel`, + Use: "disable ", + Short: "Change channel status to disabled", + Long: `Change channel status to disabled`, Run: func(cmd *cobra.Command, args []string) { if len(args) != 2 { logUsage(cmd.Use) return } - pm := mfxsdk.PageMetadata{ - Offset: uint64(Offset), - Limit: uint64(Limit), - Disconnected: true, - } - cl, err := sdk.ThingsByChannel(args[0], pm, args[1]) + + channel, err := sdk.DisableChannel(args[0], args[1]) if err != nil { logError(err) return } - logJSON(cl) + logJSON(channel) }, }, } diff --git a/cli/groups.go b/cli/groups.go index 6b1988ccd0..347bfb80b9 100644 --- a/cli/groups.go +++ b/cli/groups.go @@ -6,6 +6,7 @@ package cli import ( "encoding/json" + mfclients "github.com/mainflux/mainflux/pkg/clients" mfxsdk "github.com/mainflux/mainflux/pkg/sdk/go" "github.com/spf13/cobra" ) @@ -34,12 +35,13 @@ var cmdGroups = []cobra.Command{ logError(err) return } - id, err := sdk.CreateGroup(group, args[1]) + group.Status = mfclients.EnabledStatus.String() + group, err := sdk.CreateGroup(group, args[1]) if err != nil { logError(err) return } - logCreated(id) + logJSON(group) }, }, { @@ -58,7 +60,7 @@ var cmdGroups = []cobra.Command{ return } - if err := sdk.UpdateGroup(group, args[1]); err != nil { + if _, err := sdk.UpdateGroup(group, args[1]); err != nil { logError(err) return } @@ -143,7 +145,7 @@ var cmdGroups = []cobra.Command{ }, }, { - Use: "assign ", + Use: "assign ", Short: "Assign member", Long: `Assign members to a group. member_ids - '["member_id",...]`, @@ -152,12 +154,12 @@ var cmdGroups = []cobra.Command{ logUsage(cmd.Use) return } - var ids []string - if err := json.Unmarshal([]byte(args[0]), &ids); err != nil { + var types []string + if err := json.Unmarshal([]byte(args[0]), &types); err != nil { logError(err) return } - if err := sdk.Assign(ids, args[1], args[2], args[3]); err != nil { + if err := sdk.Assign(types, args[1], args[2], args[3]); err != nil { logError(err) return } @@ -165,37 +167,21 @@ var cmdGroups = []cobra.Command{ }, }, { - Use: "unassign ", + Use: "unassign ", Short: "Unassign member", Long: `Unassign members from a group member_ids - '["member_id",...]`, Run: func(cmd *cobra.Command, args []string) { - if len(args) != 3 { + if len(args) != 4 { logUsage(cmd.Use) return } - var ids []string - if err := json.Unmarshal([]byte(args[0]), &ids); err != nil { + var types []string + if err := json.Unmarshal([]byte(args[0]), &types); err != nil { logError(err) return } - if err := sdk.Unassign(args[1], ids, args[2]); err != nil { - logError(err) - return - } - logOK() - }, - }, - { - Use: "delete ", - Short: "Delete group", - Long: `Delete group.`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - logUsage(cmd.Use) - return - } - if err := sdk.DeleteGroup(args[0], args[1]); err != nil { + if err := sdk.Unassign(types, args[1], args[2], args[3]); err != nil { logError(err) return } @@ -214,6 +200,7 @@ var cmdGroups = []cobra.Command{ pm := mfxsdk.PageMetadata{ Offset: uint64(Offset), Limit: uint64(Limit), + Status: Status, } up, err := sdk.Members(args[0], pm, args[1]) if err != nil { @@ -244,6 +231,44 @@ var cmdGroups = []cobra.Command{ logJSON(up) }, }, + { + Use: "enable ", + Short: "Change group status to enabled", + Long: `Change group status to enabled`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 2 { + logUsage(cmd.Use) + return + } + + group, err := sdk.EnableGroup(args[0], args[1]) + if err != nil { + logError(err) + return + } + + logJSON(group) + }, + }, + { + Use: "disable ", + Short: "Change group status to disabled", + Long: `Change group status to disabled`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 2 { + logUsage(cmd.Use) + return + } + + group, err := sdk.DisableGroup(args[0], args[1]) + if err != nil { + logError(err) + return + } + + logJSON(group) + }, + }, } // NewGroupsCmd returns users command. diff --git a/cli/keys.go b/cli/keys.go deleted file mode 100644 index c08052a85b..0000000000 --- a/cli/keys.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package cli - -import ( - "time" - - "github.com/spf13/cobra" -) - -var cmdAPIKeys = []cobra.Command{ - { - Use: "issue ", - Short: "Issue key", - Long: `Issues a new Key`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - logUsage(cmd.Use) - return - } - - d, err := time.ParseDuration(args[0]) - if err != nil { - logError(err) - return - } - - resp, err := sdk.Issue(d, args[1]) - if err != nil { - logError(err) - return - } - - logJSON(resp) - }, - }, - { - Use: "revoke ", - Short: "Revoke key", - Long: `Removes API key from database`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - logUsage(cmd.Use) - return - } - - if err := sdk.Revoke(args[0], args[1]); err != nil { - logError(err) - return - } - - logOK() - }, - }, - { - Use: "retrieve ", - Short: "Retrieve key", - Long: `Retrieves API key with given id`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - logUsage(cmd.Use) - return - } - - rk, err := sdk.RetrieveKey(args[0], args[1]) - if err != nil { - logError(err) - return - } - - logJSON(rk) - }, - }, -} - -// NewKeysCmd returns keys command. -func NewKeysCmd() *cobra.Command { - cmd := cobra.Command{ - Use: "keys [issue | revoke | retrieve]", - Short: "Keys management", - Long: `Keys management: issue, revoke, or retrieve API key.`, - } - - for i := range cmdAPIKeys { - cmd.AddCommand(&cmdAPIKeys[i]) - } - - return &cmd -} diff --git a/cli/provision.go b/cli/provision.go index ce8f453694..249fc991a8 100644 --- a/cli/provision.go +++ b/cli/provision.go @@ -99,6 +99,7 @@ var cmdProvision = []cobra.Command{ logError(err) return } + logOK() }, }, { @@ -119,17 +120,23 @@ var cmdProvision = []cobra.Command{ } rand.Seed(time.Now().UnixNano()) - un := fmt.Sprintf("%s@email.com", namesgenerator.GetRandomName(0)) + name := namesgenerator.GetRandomName(0) // Create test user user := mfxsdk.User{ - Email: un, - Password: "12345678", - } - if _, err := sdk.CreateUser(user, ""); err != nil { + Name: name, + Credentials: mfxsdk.Credentials{ + Identity: fmt.Sprintf("%s@email.com", name), + Secret: "12345678", + }, + Status: mfxsdk.EnabledStatus, + } + user, err := sdk.CreateUser(user, "") + if err != nil { logError(err) return } + user.Credentials.Secret = "12345678" ut, err := sdk.CreateToken(user) if err != nil { logError(err) @@ -139,14 +146,14 @@ var cmdProvision = []cobra.Command{ // Create things for i := 0; i < numThings; i++ { n := fmt.Sprintf("d%d", i) - t := mfxsdk.Thing{ - Name: n, + Name: n, + Status: mfxsdk.EnabledStatus, } things = append(things, t) } - things, err = sdk.CreateThings(things, ut) + things, err = sdk.CreateThings(things, ut.AccessToken) if err != nil { logError(err) return @@ -157,12 +164,13 @@ var cmdProvision = []cobra.Command{ n := fmt.Sprintf("c%d", i) c := mfxsdk.Channel{ - Name: n, + Name: n, + Status: mfxsdk.EnabledStatus, } channels = append(channels, c) } - channels, err = sdk.CreateChannels(channels, ut) + channels, err = sdk.CreateChannels(channels, ut.AccessToken) if err != nil { logError(err) return @@ -173,7 +181,7 @@ var cmdProvision = []cobra.Command{ ChannelIDs: []string{channels[0].ID, channels[1].ID}, ThingIDs: []string{things[0].ID}, } - if err := sdk.Connect(conIDs, ut); err != nil { + if err := sdk.Connect(conIDs, ut.AccessToken); err != nil { logError(err) return } @@ -182,7 +190,7 @@ var cmdProvision = []cobra.Command{ ChannelIDs: []string{channels[0].ID}, ThingIDs: []string{things[1].ID}, } - if err := sdk.Connect(conIDs, ut); err != nil { + if err := sdk.Connect(conIDs, ut.AccessToken); err != nil { logError(err) return } diff --git a/cli/things.go b/cli/things.go index f7bd58707f..155e3fa41e 100644 --- a/cli/things.go +++ b/cli/things.go @@ -6,6 +6,7 @@ package cli import ( "encoding/json" + mfclients "github.com/mainflux/mainflux/pkg/clients" mfxsdk "github.com/mainflux/mainflux/pkg/sdk/go" "github.com/spf13/cobra" ) @@ -26,14 +27,14 @@ var cmdThings = []cobra.Command{ logError(err) return } - - id, err := sdk.CreateThing(thing, args[1]) + thing.Status = mfclients.EnabledStatus.String() + thing, err := sdk.CreateThing(thing, args[1]) if err != nil { logError(err) return } - logCreated(id) + logJSON(thing) }, }, { @@ -77,97 +78,154 @@ var cmdThings = []cobra.Command{ }, }, { - Use: "delete ", - Short: "Delete thing", - Long: `Removes thing from database`, + Use: "identify ", + Short: "Identify thing", + Long: "Validates thing's key and returns its ID", Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { + if len(args) != 1 { logUsage(cmd.Use) return } - if err := sdk.DeleteThing(args[0], args[1]); err != nil { + i, err := sdk.IdentifyThing(args[0]) + if err != nil { logError(err) return } - logOK() + logJSON(i) }, }, { - Use: "identify ", - Short: "Identify thing", - Long: "Validates thing's key and returns its ID", + Use: "update [ | key ] ", + Short: "Update thing", + Long: `Update thing record`, Run: func(cmd *cobra.Command, args []string) { - if len(args) != 1 { + if len(args) != 3 { logUsage(cmd.Use) return } - i, err := sdk.IdentifyThing(args[0]) + var thing mfxsdk.Thing + if err := json.Unmarshal([]byte(args[1]), &thing); err != nil { + logError(err) + return + } + thing.ID = args[0] + thing, err := sdk.UpdateThing(thing, args[2]) if err != nil { logError(err) return } - logJSON(i) + logJSON(thing) }, }, { - Use: "update [ | key ] ", - Short: "Update thing", + Use: "update tags ", + Short: "Update thing tags", Long: `Update thing record`, Run: func(cmd *cobra.Command, args []string) { - if len(args) < 2 { + if len(args) != 3 { logUsage(cmd.Use) return } - if args[0] == "key" { - if err := sdk.UpdateThingKey(args[1], args[2], args[3]); err != nil { - logError(err) - return - } - logOK() + var thing mfxsdk.Thing + if err := json.Unmarshal([]byte(args[1]), &thing.Tags); err != nil { + logError(err) return } - var thing mfxsdk.Thing - if err := json.Unmarshal([]byte(args[0]), &thing); err != nil { + thing.ID = args[0] + thing, err := sdk.UpdateThingTags(thing, args[2]) + if err != nil { logError(err) return } - if err := sdk.UpdateThing(thing, args[1]); err != nil { + logJSON(thing) + }, + }, + { + Use: "update secret ", + Short: "Update thing tags", + Long: `Update thing record`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 3 { + logUsage(cmd.Use) + return + } + + thing, err := sdk.UpdateThingSecret(args[0], args[1], args[2]) + if err != nil { logError(err) return } - logOK() + logJSON(thing) + }, + }, + { + Use: "update owner ", + Short: "Update thing owner", + Long: `Update thing record`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 3 { + logUsage(cmd.Use) + return + } + + var thing mfxsdk.Thing + if err := json.Unmarshal([]byte(args[1]), &thing.Owner); err != nil { + logError(err) + return + } + thing.ID = args[0] + thing, err := sdk.UpdateThingOwner(thing, args[2]) + if err != nil { + logError(err) + return + } + + logJSON(thing) }, }, { - Use: "share ", - Short: "share thing", - Long: `Shares a thing with user identified. - policies - '["policy1", ...]'`, + Use: "enable ", + Short: "Change thing status to enabled", + Long: `Change thing status to enabled`, Run: func(cmd *cobra.Command, args []string) { - if len(args) != 4 { + if len(args) != 2 { logUsage(cmd.Use) return } - var policies []string - if err := json.Unmarshal([]byte(args[2]), &policies); err != nil { + thing, err := sdk.EnableThing(args[0], args[1]) + if err != nil { logError(err) return } - if err := sdk.ShareThing(args[0], args[1], policies, args[2]); err != nil { + logJSON(thing) + }, + }, + { + Use: "disable ", + Short: "Change thing status to disabled", + Long: `Change thing status to disabled`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 2 { + logUsage(cmd.Use) + return + } + + thing, err := sdk.DisableThing(args[0], args[1]) + if err != nil { logError(err) return } - logOK() + logJSON(thing) }, }, { @@ -202,7 +260,11 @@ var cmdThings = []cobra.Command{ return } - if err := sdk.DisconnectThing(args[0], args[1], args[2]); err != nil { + connIDs := mfxsdk.ConnectionIDs{ + ThingIDs: []string{args[0]}, + ChannelIDs: []string{args[1]}, + } + if err := sdk.Disconnect(connIDs, args[2]); err != nil { logError(err) return } @@ -230,29 +292,6 @@ var cmdThings = []cobra.Command{ return } - logJSON(cl) - }, - }, - { - Use: "not-connected ", - Short: "Not-connected list", - Long: `List of Channels not connected to a Thing`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { - logUsage(cmd.Use) - return - } - pm := mfxsdk.PageMetadata{ - Offset: uint64(Offset), - Limit: uint64(Limit), - Disconnected: true, - } - cl, err := sdk.ChannelsByThing(args[0], pm, args[1]) - if err != nil { - logError(err) - return - } - logJSON(cl) }, }, diff --git a/cli/users.go b/cli/users.go index e4b12c7274..c73bc265d2 100644 --- a/cli/users.go +++ b/cli/users.go @@ -6,35 +6,40 @@ package cli import ( "encoding/json" + mfclients "github.com/mainflux/mainflux/pkg/clients" mfxsdk "github.com/mainflux/mainflux/pkg/sdk/go" "github.com/spf13/cobra" ) var cmdUsers = []cobra.Command{ { - Use: "create ", + Use: "create ", Short: "Create user", Long: `Creates new user`, Run: func(cmd *cobra.Command, args []string) { - if len(args) < 2 || len(args) > 3 { + if len(args) < 3 || len(args) > 4 { logUsage(cmd.Use) return } - if len(args) == 2 { + if len(args) == 3 { args = append(args, "") } user := mfxsdk.User{ - Email: args[0], - Password: args[1], + Name: args[0], + Credentials: mfxsdk.Credentials{ + Identity: args[1], + Secret: args[2], + }, + Status: mfclients.EnabledStatus.String(), } - id, err := sdk.CreateUser(user, args[2]) + user, err := sdk.CreateUser(user, args[3]) if err != nil { logError(err) return } - logCreated(id) + logJSON(user) }, }, { @@ -89,8 +94,10 @@ var cmdUsers = []cobra.Command{ } user := mfxsdk.User{ - Email: args[0], - Password: args[1], + Credentials: mfxsdk.Credentials{ + Identity: args[0], + Secret: args[1], + }, } token, err := sdk.CreateToken(user) if err != nil { @@ -98,32 +105,148 @@ var cmdUsers = []cobra.Command{ return } - logCreated(token) + logJSON(token) }, }, { - Use: "update ", + Use: "refreshtoken ", + Short: "Get token", + Long: `Generate new token from refresh token`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 1 { + logUsage(cmd.Use) + return + } + + token, err := sdk.RefreshToken(args[0]) + if err != nil { + logError(err) + return + } + + logJSON(token) + + }, + }, + { + Use: "update ", Short: "Update user", Long: `Update user metadata`, Run: func(cmd *cobra.Command, args []string) { - if len(args) != 2 { + if len(args) != 3 { + logUsage(cmd.Use) + return + } + + var user mfxsdk.User + if err := json.Unmarshal([]byte(args[1]), &user); err != nil { + logError(err) + return + } + user.ID = args[0] + user, err := sdk.UpdateUser(user, args[2]) + if err != nil { + logError(err) + return + } + + logJSON(user) + }, + }, + { + Use: "updatetags ", + Short: "Update user tags", + Long: `Update user tags`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 3 { + logUsage(cmd.Use) + return + } + + var user mfxsdk.User + if err := json.Unmarshal([]byte(args[1]), &user.Tags); err != nil { + logError(err) + return + } + user.ID = args[0] + user, err := sdk.UpdateUserTags(user, args[2]) + if err != nil { + logError(err) + return + } + + logJSON(user) + }, + }, + { + Use: "updateidentity ", + Short: "Update user identity", + Long: `Update user identity`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 3 { + logUsage(cmd.Use) + return + } + + var user mfxsdk.User + if err := json.Unmarshal([]byte(args[1]), &user.Credentials.Identity); err != nil { + logError(err) + return + } + user.ID = args[0] + user, err := sdk.UpdateUserTags(user, args[2]) + if err != nil { + logError(err) + return + } + + logJSON(user) + }, + }, + { + Use: "updateowner ", + Short: "Update user owner", + Long: `Update user owner`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 3 { logUsage(cmd.Use) return } var user mfxsdk.User - if err := json.Unmarshal([]byte(args[0]), &user.Metadata); err != nil { + if err := json.Unmarshal([]byte(args[1]), &user.Owner); err != nil { logError(err) return } + user.ID = args[0] + user, err := sdk.UpdateUserTags(user, args[2]) + if err != nil { + logError(err) + return + } + + logJSON(user) + }, + }, + + { + Use: "profile ", + Short: "Get user profile", + Long: `Get user profile`, + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 1 { + logUsage(cmd.Use) + return + } - if err := sdk.UpdateUser(user, args[1]); err != nil { + user, err := sdk.UserProfile(args[0]) + if err != nil { logError(err) return } - logOK() + logJSON(user) }, }, { @@ -136,12 +259,13 @@ var cmdUsers = []cobra.Command{ return } - if err := sdk.UpdatePassword(args[0], args[1], args[2]); err != nil { + user, err := sdk.UpdatePassword(args[0], args[1], args[2]) + if err != nil { logError(err) return } - logOK() + logJSON(user) }, }, { @@ -154,12 +278,13 @@ var cmdUsers = []cobra.Command{ return } - if err := sdk.EnableUser(args[0], args[1]); err != nil { + user, err := sdk.EnableUser(args[0], args[1]) + if err != nil { logError(err) return } - logOK() + logJSON(user) }, }, { @@ -172,12 +297,13 @@ var cmdUsers = []cobra.Command{ return } - if err := sdk.DisableUser(args[0], args[1]); err != nil { + user, err := sdk.DisableUser(args[0], args[1]) + if err != nil { logError(err) return } - logOK() + logJSON(user) }, }, } diff --git a/cmd/auth/main.go b/cmd/auth/main.go deleted file mode 100644 index 0f38fde159..0000000000 --- a/cmd/auth/main.go +++ /dev/null @@ -1,163 +0,0 @@ -package main - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - api "github.com/mainflux/mainflux/auth/api" - grpcapi "github.com/mainflux/mainflux/auth/api/grpc" - httpapi "github.com/mainflux/mainflux/auth/api/http" - "github.com/mainflux/mainflux/auth/jwt" - "github.com/mainflux/mainflux/auth/keto" - authPg "github.com/mainflux/mainflux/auth/postgres" - "github.com/mainflux/mainflux/auth/tracing" - "github.com/mainflux/mainflux/internal" - grpcClient "github.com/mainflux/mainflux/internal/clients/grpc" - jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" - pgClient "github.com/mainflux/mainflux/internal/clients/postgres" - "github.com/mainflux/mainflux/internal/env" - "github.com/mainflux/mainflux/internal/server" - grpcserver "github.com/mainflux/mainflux/internal/server/grpc" - httpserver "github.com/mainflux/mainflux/internal/server/http" - mflog "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go" - acl "github.com/ory/keto/proto/ory/keto/acl/v1alpha1" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" -) - -const ( - svcName = "auth" - envPrefix = "MF_AUTH_" - envPrefixHttp = "MF_AUTH_HTTP_" - envPrefixGrpc = "MF_AUTH_GRPC_" - defDB = "auth" - defSvcHttpPort = "9020" - defSvcGrpcPort = "7001" -) - -type config struct { - LogLevel string `env:"MF_AUTH_LOG_LEVEL" envDefault:"info"` - Secret string `env:"MF_AUTH_SECRET" envDefault:"auth"` - KetoReadHost string `env:"MF_KETO_READ_REMOTE_HOST" envDefault:"mainflux-keto"` - KetoReadPort string `env:"MF_KETO_READ_REMOTE_PORT" envDefault:"4466"` - KetoWriteHost string `env:"MF_KETO_WRITE_REMOTE_HOST" envDefault:"mainflux-keto"` - KetoWritePort string `env:"MF_KETO_WRITE_REMOTE_PORT" envDefault:"4467"` - LoginDuration time.Duration `env:"MF_AUTH_LOGIN_TOKEN_DURATION" envDefault:"10h"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` -} - -func main() { - ctx, cancel := context.WithCancel(context.Background()) - g, ctx := errgroup.WithContext(ctx) - - // Create auth service configurations - cfg := config{} - if err := env.Parse(&cfg); err != nil { - log.Fatalf("failed to load %s configuration : %s", svcName, err) - } - - logger, err := mflog.New(os.Stdout, cfg.LogLevel) - if err != nil { - log.Fatalf("failed to init logger: %s", err) - } - - // Create new postgres client - dbConfig := pgClient.Config{Name: defDB} - db, err := pgClient.SetupWithConfig(envPrefix, *authPg.Migration(), dbConfig) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to setup postgres database : %s", err)) - } - defer db.Close() - - // Create new tracer for database - dbTracer, dbCloser, err := jaegerClient.NewTracer("auth_db", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer dbCloser.Close() - - // Create new keto reader grpc client - readerConn, _, err := grpcClient.Connect(grpcClient.Config{ClientTLS: false, URL: fmt.Sprintf("%s:%s", cfg.KetoReadHost, cfg.KetoReadPort)}) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to connect to keto gRPC: %s", err)) - } - - // Create new keto writer grpc client - writerConn, _, err := grpcClient.Connect(grpcClient.Config{ClientTLS: false, URL: fmt.Sprintf("%s:%s", cfg.KetoWriteHost, cfg.KetoWritePort)}) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to connect to keto gRPC: %s", err)) - } - - svc := newService(db, dbTracer, cfg.Secret, logger, readerConn, writerConn, cfg.LoginDuration) - - // Create new HTTP Server - tracer, closer, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer closer.Close() - - httpServerConfig := server.Config{Port: defSvcHttpPort} - - if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { - logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) - } - - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, httpapi.MakeHandler(svc, tracer, logger), logger) - - // Create new grpc server - grpcServerConfig := server.Config{Port: defSvcGrpcPort} - - if err := env.Parse(&grpcServerConfig, env.Options{Prefix: envPrefixGrpc, AltPrefix: envPrefix}); err != nil { - logger.Fatal(fmt.Sprintf("failed to load %s gRPC server configuration : %s", svcName, err)) - } - registerAuthServiceServer := func(srv *grpc.Server) { - mainflux.RegisterAuthServiceServer(srv, grpcapi.NewServer(tracer, svc)) - } - - gs := grpcserver.New(ctx, cancel, svcName, grpcServerConfig, registerAuthServiceServer, logger) - - // Start servers - g.Go(func() error { - return hs.Start() - }) - g.Go(func() error { - return gs.Start() - }) - - g.Go(func() error { - return server.StopSignalHandler(ctx, cancel, logger, svcName, hs, gs) - }) - if err := g.Wait(); err != nil { - logger.Error(fmt.Sprintf("Authentication service terminated: %s", err)) - } -} - -func newService(db *sqlx.DB, tracer opentracing.Tracer, secret string, logger mflog.Logger, readerConn, writerConn *grpc.ClientConn, duration time.Duration) auth.Service { - database := authPg.NewDatabase(db) - keysRepo := tracing.New(tracer, authPg.New(database)) - - groupsRepo := authPg.NewGroupRepo(database) - groupsRepo = tracing.GroupRepositoryMiddleware(tracer, groupsRepo) - - pa := keto.NewPolicyAgent(acl.NewCheckServiceClient(readerConn), acl.NewWriteServiceClient(writerConn), acl.NewReadServiceClient(readerConn)) - - idProvider := uuid.New() - t := jwt.New(secret) - - svc := auth.New(keysRepo, groupsRepo, idProvider, t, pa, duration) - svc = api.LoggingMiddleware(svc, logger) - - counter, latency := internal.MakeMetrics(svcName, "api") - svc = api.MetricsMiddleware(svc, counter, latency) - - return svc -} diff --git a/cmd/bootstrap/main.go b/cmd/bootstrap/main.go index 51fdf2967d..888039b6e5 100644 --- a/cmd/bootstrap/main.go +++ b/cmd/bootstrap/main.go @@ -8,11 +8,6 @@ import ( "log" "os" - r "github.com/go-redis/redis/v8" - "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/bootstrap" - api "github.com/mainflux/mainflux/bootstrap/api" bootstrapPg "github.com/mainflux/mainflux/bootstrap/postgres" rediscons "github.com/mainflux/mainflux/bootstrap/redis/consumer" redisprod "github.com/mainflux/mainflux/bootstrap/redis/producer" @@ -25,7 +20,13 @@ import ( httpserver "github.com/mainflux/mainflux/internal/server/http" mflog "github.com/mainflux/mainflux/logger" mfsdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/users/policies" "golang.org/x/sync/errgroup" + + "github.com/go-redis/redis/v8" + "github.com/jmoiron/sqlx" + "github.com/mainflux/mainflux/bootstrap" + "github.com/mainflux/mainflux/bootstrap/api" ) const ( @@ -42,7 +43,7 @@ type config struct { EncKey string `env:"MF_BOOTSTRAP_ENCRYPT_KEY" envDefault:"12345678910111213141516171819202"` ESConsumerName string `env:"MF_BOOTSTRAP_EVENT_CONSUMER" envDefault:"bootstrap"` ThingsURL string `env:"MF_THINGS_URL" envDefault:"http://localhost:9000"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -76,7 +77,7 @@ func main() { defer esClient.Close() // Create new auth grpc client api - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -115,7 +116,7 @@ func main() { } } -func newService(auth mainflux.AuthServiceClient, db *sqlx.DB, logger mflog.Logger, esClient *r.Client, cfg config) bootstrap.Service { +func newService(auth policies.AuthServiceClient, db *sqlx.DB, logger mflog.Logger, esClient *redis.Client, cfg config) bootstrap.Service { repoConfig := bootstrapPg.NewConfigRepository(db, logger) config := mfsdk.Config{ @@ -133,7 +134,7 @@ func newService(auth mainflux.AuthServiceClient, db *sqlx.DB, logger mflog.Logge return svc } -func subscribeToThingsES(svc bootstrap.Service, client *r.Client, consumer string, logger mflog.Logger) { +func subscribeToThingsES(svc bootstrap.Service, client *redis.Client, consumer string, logger mflog.Logger) { eventStore := rediscons.NewEventStore(svc, client, consumer, logger) logger.Info("Subscribed to Redis Event Store") if err := eventStore.Subscribe(context.Background(), "mainflux.things"); err != nil { diff --git a/cmd/cassandra-reader/main.go b/cmd/cassandra-reader/main.go index 9fe722e8ac..7f9d90a3b0 100644 --- a/cmd/cassandra-reader/main.go +++ b/cmd/cassandra-reader/main.go @@ -33,7 +33,7 @@ const ( type config struct { LogLevel string `env:"MF_CASSANDRA_READER_LOG_LEVEL" envDefault:"info"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -61,7 +61,7 @@ func main() { logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) // Create new auth grpc client - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -85,7 +85,7 @@ func main() { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName), logger) // Start servers g.Go(func() error { diff --git a/cmd/cassandra-writer/main.go b/cmd/cassandra-writer/main.go index df5a32809d..7910240e4f 100644 --- a/cmd/cassandra-writer/main.go +++ b/cmd/cassandra-writer/main.go @@ -61,11 +61,16 @@ func main() { } defer csdSession.Close() - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) // Create new cassandra-writer repo repo := newService(csdSession, logger) diff --git a/cmd/certs/main.go b/cmd/certs/main.go index dc73e4f395..e3d59d7c15 100644 --- a/cmd/certs/main.go +++ b/cmd/certs/main.go @@ -10,8 +10,6 @@ import ( "log" "os" - "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/certs" "github.com/mainflux/mainflux/certs/api" vault "github.com/mainflux/mainflux/certs/pki" @@ -21,6 +19,7 @@ import ( "github.com/mainflux/mainflux/internal/server" httpserver "github.com/mainflux/mainflux/internal/server/http" mflog "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/users/policies" "golang.org/x/sync/errgroup" "github.com/jmoiron/sqlx" @@ -49,7 +48,7 @@ type config struct { LogLevel string `env:"MF_CERTS_LOG_LEVEL" envDefault:"info"` CertsURL string `env:"MF_SDK_CERTS_URL" envDefault:"http://localhost"` ThingsURL string `env:"MF_THINGS_URL" envDefault:"http://things:9000"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` // Sign and issue certificates without 3rd party PKI SignCAPath string `env:"MF_CERTS_SIGN_CA_PATH" envDefault:"ca.crt"` @@ -100,14 +99,14 @@ func main() { } defer db.Close() - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } defer authHandler.Close() logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) - svc := newService(auth, db, logger, nil, tlsCert, caCert, cfg, pkiClient) + svc := newService(auth, db, logger, tlsCert, caCert, cfg, pkiClient) httpServerConfig := server.Config{Port: defSvcHttpPort} if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { @@ -128,7 +127,7 @@ func main() { } } -func newService(auth mainflux.AuthServiceClient, db *sqlx.DB, logger mflog.Logger, esClient *redis.Client, tlsCert tls.Certificate, x509Cert *x509.Certificate, cfg config, pkiAgent vault.Agent) certs.Service { +func newService(auth policies.AuthServiceClient, db *sqlx.DB, logger mflog.Logger, tlsCert tls.Certificate, x509Cert *x509.Certificate, cfg config, pkiAgent vault.Agent) certs.Service { certsRepo := certsPg.NewRepository(db, logger) config := mfsdk.Config{ CertsURL: cfg.CertsURL, diff --git a/cmd/cli/main.go b/cmd/cli/main.go index 366dce59ac..2ab92d434a 100644 --- a/cmd/cli/main.go +++ b/cmd/cli/main.go @@ -17,7 +17,6 @@ const defURL string = "http://localhost" func main() { msgContentType := string(sdk.CTJSONSenML) sdkConf := sdk.Config{ - AuthURL: defURL, ThingsURL: defURL, UsersURL: defURL, ReaderURL: defURL, @@ -50,7 +49,6 @@ func main() { provisionCmd := cli.NewProvisionCmd() bootstrapCmd := cli.NewBootstrapCmd() certsCmd := cli.NewCertsCmd() - keysCmd := cli.NewKeysCmd() subscriptionsCmd := cli.NewSubscriptionCmd() policiesCmd := cli.NewPolicyCmd() @@ -64,19 +62,10 @@ func main() { rootCmd.AddCommand(provisionCmd) rootCmd.AddCommand(bootstrapCmd) rootCmd.AddCommand(certsCmd) - rootCmd.AddCommand(keysCmd) rootCmd.AddCommand(subscriptionsCmd) rootCmd.AddCommand(policiesCmd) // Root Flags - rootCmd.PersistentFlags().StringVarP( - &sdkConf.AuthURL, - "auth-url", - "a", - sdkConf.AuthURL, - "Auth service URL", - ) - rootCmd.PersistentFlags().StringVarP( &sdkConf.BootstrapURL, "bootstrap-url", diff --git a/cmd/coap/main.go b/cmd/coap/main.go index 13c6a88059..b1a6c6cc60 100644 --- a/cmd/coap/main.go +++ b/cmd/coap/main.go @@ -37,7 +37,7 @@ const ( type config struct { LogLevel string `env:"MF_INFLUX_READER_LOG_LEVEL" envDefault:"info"` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -61,11 +61,16 @@ func main() { defer tcHandler.Close() logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) nps, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { diff --git a/cmd/http/main.go b/cmd/http/main.go index 2cb126c5ca..ed40ed5fe2 100644 --- a/cmd/http/main.go +++ b/cmd/http/main.go @@ -9,7 +9,6 @@ import ( "log" "os" - "github.com/mainflux/mainflux" adapter "github.com/mainflux/mainflux/http" "github.com/mainflux/mainflux/http/api" "github.com/mainflux/mainflux/http/tracing" @@ -23,7 +22,9 @@ import ( "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/pkg/messaging/brokers" pstracing "github.com/mainflux/mainflux/pkg/messaging/tracing" - "github.com/opentracing/opentracing-go" + "github.com/mainflux/mainflux/things/policies" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" ) @@ -37,7 +38,7 @@ const ( type config struct { LogLevel string `env:"MF_HTTP_ADAPTER_LOG_LEVEL" envDefault:"info"` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -61,11 +62,16 @@ func main() { defer tcHandler.Close() logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - tracer, closer, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer closer.Close() + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pub, err := brokers.NewPublisher(cfg.BrokerURL) if err != nil { @@ -80,7 +86,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, tracer, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc), logger) g.Go(func() error { return hs.Start() @@ -95,7 +101,7 @@ func main() { } } -func newService(pub messaging.Publisher, tc mainflux.ThingsServiceClient, logger mflog.Logger, tracer opentracing.Tracer) adapter.Service { +func newService(pub messaging.Publisher, tc policies.ThingsServiceClient, logger mflog.Logger, tracer trace.Tracer) adapter.Service { svc := adapter.New(pub, tc) svc = tracing.New(tracer, svc) svc = api.LoggingMiddleware(svc, logger) diff --git a/cmd/influxdb-reader/main.go b/cmd/influxdb-reader/main.go index e51129a879..8bb12bb8ed 100644 --- a/cmd/influxdb-reader/main.go +++ b/cmd/influxdb-reader/main.go @@ -31,7 +31,7 @@ const ( type config struct { LogLevel string `env:"MF_INFLUX_READER_LOG_LEVEL" envDefault:"info"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -55,7 +55,7 @@ func main() { defer tcHandler.Close() logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -85,7 +85,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName), logger) g.Go(func() error { return hs.Start() diff --git a/cmd/influxdb-writer/main.go b/cmd/influxdb-writer/main.go index 6d8865bc21..f476b3b86d 100644 --- a/cmd/influxdb-writer/main.go +++ b/cmd/influxdb-writer/main.go @@ -52,11 +52,16 @@ func main() { log.Fatalf("failed to init logger: %s", err) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { diff --git a/cmd/lora/main.go b/cmd/lora/main.go index bec01f680b..ef3d19b758 100644 --- a/cmd/lora/main.go +++ b/cmd/lora/main.go @@ -75,11 +75,16 @@ func main() { } defer rmConn.Close() - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pub, err := brokers.NewPublisher(cfg.BrokerURL) if err != nil { @@ -169,7 +174,7 @@ func newService(pub messaging.Publisher, rmConn *r.Client, thingsRMPrefix, chann svc := lora.New(pub, thingsRM, chansRM, connsRM) svc = api.LoggingMiddleware(svc, logger) - counter, latency := internal.MakeMetrics(svcName, "api") + counter, latency := internal.MakeMetrics("lora_adapter", "api") svc = api.MetricsMiddleware(svc, counter, latency) return svc diff --git a/cmd/mongodb-reader/main.go b/cmd/mongodb-reader/main.go index 0100e2944e..8f732fd259 100644 --- a/cmd/mongodb-reader/main.go +++ b/cmd/mongodb-reader/main.go @@ -34,7 +34,7 @@ const ( type config struct { LogLevel string `env:"MF_MONGO_READER_LOG_LEVEL" envDefault:"info"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -65,7 +65,7 @@ func main() { defer tcHandler.Close() logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -76,7 +76,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName), logger) g.Go(func() error { return hs.Start() diff --git a/cmd/mongodb-writer/main.go b/cmd/mongodb-writer/main.go index 9d6b853db0..9c2f9b824e 100644 --- a/cmd/mongodb-writer/main.go +++ b/cmd/mongodb-writer/main.go @@ -54,11 +54,16 @@ func main() { log.Fatalf("failed to init logger: %s", err) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { diff --git a/cmd/mqtt/main.go b/cmd/mqtt/main.go index 6e13dd4e9e..c5da2d18fe 100644 --- a/cmd/mqtt/main.go +++ b/cmd/mqtt/main.go @@ -49,7 +49,7 @@ type config struct { HttpTargetPort string `env:"MF_MQTT_ADAPTER_WS_TARGET_PORT" envDefault:"8080"` HttpTargetPath string `env:"MF_MQTT_ADAPTER_WS_TARGET_PATH" envDefault:"/mqtt"` Instance string `env:"MF_MQTT_ADAPTER_INSTANCE" envDefault:""` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` } @@ -78,11 +78,16 @@ func main() { } } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) nps, err := brokers.NewPubSub(cfg.BrokerURL, "mqtt", logger) if err != nil { diff --git a/cmd/opcua/main.go b/cmd/opcua/main.go index 731e277b76..9c1f333f5e 100644 --- a/cmd/opcua/main.go +++ b/cmd/opcua/main.go @@ -82,11 +82,16 @@ func main() { } defer esConn.Close() - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { @@ -156,7 +161,7 @@ func newRouteMapRepositoy(client *r.Client, prefix string, logger mflog.Logger) func newService(sub opcua.Subscriber, browser opcua.Browser, thingRM, chanRM, connRM opcua.RouteMapRepository, opcuaConfig opcua.Config, logger mflog.Logger) opcua.Service { svc := opcua.New(sub, browser, thingRM, chanRM, connRM, opcuaConfig, logger) svc = api.LoggingMiddleware(svc, logger) - counter, latency := internal.MakeMetrics(svcName, "api") + counter, latency := internal.MakeMetrics("opc_ua_adapter", "api") svc = api.MetricsMiddleware(svc, counter, latency) return svc diff --git a/cmd/postgres-reader/main.go b/cmd/postgres-reader/main.go index 5b55351005..b3b5c56982 100644 --- a/cmd/postgres-reader/main.go +++ b/cmd/postgres-reader/main.go @@ -34,7 +34,7 @@ const ( type config struct { LogLevel string `env:"MF_POSTGRES_READER_LOG_LEVEL" envDefault:"info"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -58,7 +58,7 @@ func main() { defer tcHandler.Close() logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -81,7 +81,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName), logger) g.Go(func() error { return hs.Start() diff --git a/cmd/postgres-writer/main.go b/cmd/postgres-writer/main.go index abf5f0c63a..ea6620faa4 100644 --- a/cmd/postgres-writer/main.go +++ b/cmd/postgres-writer/main.go @@ -54,11 +54,16 @@ func main() { log.Fatalf("failed to init logger: %s", err) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { diff --git a/cmd/provision/main.go b/cmd/provision/main.go index 78ef7052f4..0fc8615bcc 100644 --- a/cmd/provision/main.go +++ b/cmd/provision/main.go @@ -13,11 +13,12 @@ import ( "github.com/mainflux/mainflux/internal/server" httpserver "github.com/mainflux/mainflux/internal/server/http" "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" mfSDK "github.com/mainflux/mainflux/pkg/sdk/go" "github.com/mainflux/mainflux/provision" "github.com/mainflux/mainflux/provision/api" - "github.com/mainflux/mainflux/things" "golang.org/x/sync/errgroup" ) @@ -197,7 +198,7 @@ func loadConfig() (provision.Config, error) { }, // This is default conf for provision if there is no config file - Channels: []things.Channel{ + Channels: []mfgroups.Group{ { Name: "control-channel", Metadata: map[string]interface{}{"type": "control"}, @@ -206,7 +207,7 @@ func loadConfig() (provision.Config, error) { Metadata: map[string]interface{}{"type": "data"}, }, }, - Things: []things.Thing{ + Things: []mfclients.Client{ { Name: "thing", Metadata: map[string]interface{}{"external_id": "xxxxxx"}, diff --git a/cmd/smpp-notifier/main.go b/cmd/smpp-notifier/main.go index ffa7818a04..5f7089dc7b 100644 --- a/cmd/smpp-notifier/main.go +++ b/cmd/smpp-notifier/main.go @@ -10,7 +10,6 @@ import ( "os" "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/consumers" "github.com/mainflux/mainflux/consumers/notifiers" "github.com/mainflux/mainflux/consumers/notifiers/api" @@ -19,6 +18,8 @@ import ( "github.com/mainflux/mainflux/internal/env" "github.com/mainflux/mainflux/internal/server" httpserver "github.com/mainflux/mainflux/internal/server/http" + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" mfsmpp "github.com/mainflux/mainflux/consumers/notifiers/smpp" @@ -30,7 +31,6 @@ import ( "github.com/mainflux/mainflux/pkg/messaging/brokers" pstracing "github.com/mainflux/mainflux/pkg/messaging/tracing" "github.com/mainflux/mainflux/pkg/ulid" - opentracing "github.com/opentracing/opentracing-go" ) const ( @@ -46,7 +46,7 @@ type config struct { From string `env:"MF_SMPP_NOTIFIER_FROM_ADDR" envDefault:""` ConfigPath string `env:"MF_SMPP_NOTIFIER_CONFIG_PATH" envDefault:"/config.toml"` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -75,11 +75,16 @@ func main() { logger.Fatal(fmt.Sprintf("failed to load SMPP configuration from environment : %s", err)) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(ctx); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { @@ -88,21 +93,14 @@ func main() { pubSub = pstracing.NewPubSub(tracer, pubSub) defer pubSub.Close() - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } defer authHandler.Close() logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) - dbTracer, dbCloser, err := jaegerClient.NewTracer("smpp-notifier_db", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer dbCloser.Close() - - svc := newService(db, dbTracer, auth, cfg, smppConfig, logger, tracer) - + svc := newService(db, tracer, auth, cfg, smppConfig, logger) if err = consumers.Start(ctx, svcName, pubSub, svc, cfg.ConfigPath, logger); err != nil { logger.Fatal(fmt.Sprintf("failed to create Postgres writer: %s", err)) } @@ -111,7 +109,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, tracer, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger), logger) g.Go(func() error { return hs.Start() @@ -127,12 +125,11 @@ func main() { } -func newService(db *sqlx.DB, tracer opentracing.Tracer, auth mainflux.AuthServiceClient, c config, sc mfsmpp.Config, logger mflog.Logger, svcTracer opentracing.Tracer) notifiers.Service { - database := notifierPg.NewDatabase(db) +func newService(db *sqlx.DB, tracer trace.Tracer, auth policies.AuthServiceClient, c config, sc mfsmpp.Config, logger mflog.Logger) notifiers.Service { + database := notifierPg.NewDatabase(db, tracer) repo := tracing.New(tracer, notifierPg.New(database)) idp := ulid.New() notifier := mfsmpp.New(sc) - notifier = tracing.NewNotifier(svcTracer, notifier) svc := notifiers.New(auth, repo, idp, notifier, c.From) svc = api.LoggingMiddleware(svc, logger) counter, latency := internal.MakeMetrics("notifier", "smpp") diff --git a/cmd/smtp-notifier/main.go b/cmd/smtp-notifier/main.go index 400ec3dba1..57e988385b 100644 --- a/cmd/smtp-notifier/main.go +++ b/cmd/smtp-notifier/main.go @@ -10,7 +10,6 @@ import ( "os" "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/consumers" "github.com/mainflux/mainflux/consumers/notifiers" "github.com/mainflux/mainflux/consumers/notifiers/api" @@ -21,6 +20,7 @@ import ( authClient "github.com/mainflux/mainflux/internal/clients/grpc/auth" jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/email" "github.com/mainflux/mainflux/internal/env" "github.com/mainflux/mainflux/internal/server" @@ -29,7 +29,8 @@ import ( "github.com/mainflux/mainflux/pkg/messaging/brokers" pstracing "github.com/mainflux/mainflux/pkg/messaging/tracing" "github.com/mainflux/mainflux/pkg/ulid" - opentracing "github.com/opentracing/opentracing-go" + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" ) @@ -46,7 +47,7 @@ type config struct { ConfigPath string `env:"MF_SMTP_NOTIFIER_CONFIG_PATH" envDefault:"/config.toml"` From string `env:"MF_SMTP_NOTIFIER_FROM_ADDR" envDefault:""` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -75,11 +76,16 @@ func main() { logger.Fatal(fmt.Sprintf("failed to load email configuration : %s", err)) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider("smtp-notifier_db", cfg.JaegerURL) if err != nil { logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) pubSub, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { @@ -88,20 +94,14 @@ func main() { pubSub = pstracing.NewPubSub(tracer, pubSub) defer pubSub.Close() - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } defer authHandler.Close() logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) - dbTracer, dbCloser, err := jaegerClient.NewTracer("smtp-notifier_db", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer dbCloser.Close() - - svc := newService(db, dbTracer, auth, cfg, ec, logger, tracer) + svc := newService(db, tracer, auth, cfg, ec, logger) if err = consumers.Start(ctx, svcName, pubSub, svc, cfg.ConfigPath, logger); err != nil { logger.Fatal(fmt.Sprintf("failed to create Postgres writer: %s", err)) @@ -111,7 +111,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, tracer, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, logger), logger) g.Go(func() error { return hs.Start() @@ -127,8 +127,8 @@ func main() { } -func newService(db *sqlx.DB, tracer opentracing.Tracer, auth mainflux.AuthServiceClient, c config, ec email.Config, logger mflog.Logger, svcTracer opentracing.Tracer) notifiers.Service { - database := notifierPg.NewDatabase(db) +func newService(db *sqlx.DB, tracer trace.Tracer, auth policies.AuthServiceClient, c config, ec email.Config, logger mflog.Logger) notifiers.Service { + database := notifierPg.NewDatabase(db, tracer) repo := tracing.New(tracer, notifierPg.New(database)) idp := ulid.New() @@ -138,7 +138,6 @@ func newService(db *sqlx.DB, tracer opentracing.Tracer, auth mainflux.AuthServic } notifier := smtp.New(agent) - notifier = tracing.NewNotifier(tracer, notifier) svc := notifiers.New(auth, repo, idp, notifier, c.From) svc = api.LoggingMiddleware(svc, logger) counter, latency := internal.MakeMetrics("notifier", "smtp") diff --git a/cmd/things/main.go b/cmd/things/main.go index a0e61ca71c..77945b6bf8 100644 --- a/cmd/things/main.go +++ b/cmd/things/main.go @@ -1,6 +1,3 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package main import ( @@ -10,30 +7,43 @@ import ( "os" "github.com/go-redis/redis/v8" + "github.com/go-zoo/bone" "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/internal" authClient "github.com/mainflux/mainflux/internal/clients/grpc/auth" jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" pgClient "github.com/mainflux/mainflux/internal/clients/postgres" redisClient "github.com/mainflux/mainflux/internal/clients/redis" "github.com/mainflux/mainflux/internal/env" + "github.com/mainflux/mainflux/internal/postgres" "github.com/mainflux/mainflux/internal/server" grpcserver "github.com/mainflux/mainflux/internal/server/grpc" httpserver "github.com/mainflux/mainflux/internal/server/http" mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - "github.com/mainflux/mainflux/things/api" - authgrpcapi "github.com/mainflux/mainflux/things/api/auth/grpc" - authhttpapi "github.com/mainflux/mainflux/things/api/auth/http" - thhttpapi "github.com/mainflux/mainflux/things/api/things/http" + "github.com/mainflux/mainflux/things/clients" + capi "github.com/mainflux/mainflux/things/clients/api" + cpostgres "github.com/mainflux/mainflux/things/clients/postgres" + redisthcache "github.com/mainflux/mainflux/things/clients/redis" + localusers "github.com/mainflux/mainflux/things/clients/standalone" + ctracing "github.com/mainflux/mainflux/things/clients/tracing" + "github.com/mainflux/mainflux/things/groups" + gapi "github.com/mainflux/mainflux/things/groups/api" + gpostgres "github.com/mainflux/mainflux/things/groups/postgres" + redischcache "github.com/mainflux/mainflux/things/groups/redis" + gtracing "github.com/mainflux/mainflux/things/groups/tracing" + tpolicies "github.com/mainflux/mainflux/things/policies" + grpcapi "github.com/mainflux/mainflux/things/policies/api/grpc" + papi "github.com/mainflux/mainflux/things/policies/api/http" + ppostgres "github.com/mainflux/mainflux/things/policies/postgres" + redispcache "github.com/mainflux/mainflux/things/policies/redis" + ppracing "github.com/mainflux/mainflux/things/policies/tracing" thingsPg "github.com/mainflux/mainflux/things/postgres" - rediscache "github.com/mainflux/mainflux/things/redis" - "github.com/mainflux/mainflux/things/tracing" - opentracing "github.com/opentracing/opentracing-go" + upolicies "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/reflection" ) const ( @@ -42,19 +52,17 @@ const ( envPrefixCache = "MF_THINGS_CACHE_" envPrefixES = "MF_THINGS_ES_" envPrefixHttp = "MF_THINGS_HTTP_" - envPrefixAuthHttp = "MF_THINGS_AUTH_HTTP_" envPrefixAuthGrpc = "MF_THINGS_AUTH_GRPC_" defDB = "things" defSvcHttpPort = "9000" - defSvcAuthHttpPort = "9001" defSvcAuthGrpcPort = "7000" ) type config struct { LogLevel string `env:"MF_THINGS_LOG_LEVEL" envDefault:"info"` - StandaloneEmail string `env:"MF_THINGS_STANDALONE_EMAIL" envDefault:""` + StandaloneID string `env:"MF_THINGS_STANDALONE_ID" envDefault:""` StandaloneToken string `env:"MF_THINGS_STANDALONE_TOKEN" envDefault:""` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -71,7 +79,6 @@ func main() { if err != nil { log.Fatalf("failed to init logger: %s", err) } - // Create new database for things dbConfig := pgClient.Config{Name: defDB} db, err := pgClient.SetupWithConfig(envPrefix, *thingsPg.Migration(), dbConfig) @@ -80,6 +87,17 @@ func main() { } defer db.Close() + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) + if err != nil { + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) + } + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) + // Setup new redis cache client cacheClient, err := redisClient.Setup(envPrefixCache) if err != nil { @@ -94,56 +112,35 @@ func main() { } defer esClient.Close() - // Setup new auth grpc client - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) - if err != nil { - logger.Fatal(err.Error()) - } - defer authHandler.Close() - logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) - - // Create tracer for things database - dbTracer, dbCloser, err := jaegerClient.NewTracer("things_db", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + var auth upolicies.AuthServiceClient + switch cfg.StandaloneID != "" && cfg.StandaloneToken != "" { + case true: + auth = localusers.NewAuthService(cfg.StandaloneID, cfg.StandaloneToken) + logger.Info("Using standalone auth service") + default: + authServiceClient, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) + if err != nil { + logger.Fatal(err.Error()) + } + defer authHandler.Close() + auth = authServiceClient + logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) } - defer dbCloser.Close() - // Create tracer for things cache - cacheTracer, cacheCloser, err := jaegerClient.NewTracer("things_cache", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer cacheCloser.Close() + csvc, gsvc, psvc := newService(db, auth, cacheClient, esClient, tracer, logger) - // Create new service - svc := newService(auth, dbTracer, cacheTracer, db, cacheClient, esClient, logger) - - // Create tracer for HTTP handler things - thingsTracer, thingsCloser, err := jaegerClient.NewTracer("things", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer thingsCloser.Close() - - // Create new HTTP server httpServerConfig := server.Config{Port: defSvcHttpPort} if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s gRPC server configuration : %s", svcName, err)) } - hs1 := httpserver.New(ctx, cancel, "thing-http", httpServerConfig, thhttpapi.MakeHandler(thingsTracer, svc, logger), logger) + mux := bone.New() + hsc := httpserver.New(ctx, cancel, "things-clients", httpServerConfig, capi.MakeHandler(csvc, mux, logger), logger) + hsg := httpserver.New(ctx, cancel, "things-groups", httpServerConfig, gapi.MakeHandler(gsvc, mux, logger), logger) + hsp := httpserver.New(ctx, cancel, "things-policies", httpServerConfig, papi.MakePolicyHandler(csvc, psvc, mux, logger), logger) - // Create new things auth http server - authHttpServerConfig := server.Config{Port: defSvcAuthHttpPort} - if err := env.Parse(&authHttpServerConfig, env.Options{Prefix: envPrefixAuthHttp, AltPrefix: envPrefix}); err != nil { - logger.Fatal(fmt.Sprintf("failed to load %s gRPC server configuration : %s", svcName, err)) - } - hs2 := httpserver.New(ctx, cancel, "auth-http", authHttpServerConfig, authhttpapi.MakeHandler(thingsTracer, svc, logger), logger) - - // Create new grpc server registerThingsServiceServer := func(srv *grpc.Server) { - mainflux.RegisterThingsServiceServer(srv, authgrpcapi.NewServer(thingsTracer, svc)) - + reflection.Register(srv) + tpolicies.RegisterThingsServiceServer(srv, grpcapi.NewServer(csvc, psvc)) } grpcServerConfig := server.Config{Port: defSvcAuthGrpcPort} if err := env.Parse(&grpcServerConfig, env.Options{Prefix: envPrefixAuthGrpc, AltPrefix: envPrefix}); err != nil { @@ -151,47 +148,55 @@ func main() { } gs := grpcserver.New(ctx, cancel, svcName, grpcServerConfig, registerThingsServiceServer, logger) - //Start all servers + // Start all servers g.Go(func() error { - return hs1.Start() - }) - g.Go(func() error { - return hs2.Start() + return hsp.Start() }) g.Go(func() error { return gs.Start() }) g.Go(func() error { - return server.StopSignalHandler(ctx, cancel, logger, svcName, hs1, hs2, gs) + return server.StopSignalHandler(ctx, cancel, logger, svcName, hsc, hsg, hsp, gs) }) if err := g.Wait(); err != nil { - logger.Error(fmt.Sprintf("Things service terminated: %s", err)) + logger.Error(fmt.Sprintf("%s service terminated: %s", svcName, err)) } } -func newService(auth mainflux.AuthServiceClient, dbTracer opentracing.Tracer, cacheTracer opentracing.Tracer, db *sqlx.DB, cacheClient *redis.Client, esClient *redis.Client, logger mflog.Logger) things.Service { - database := thingsPg.NewDatabase(db) +func newService(db *sqlx.DB, auth upolicies.AuthServiceClient, cacheClient *redis.Client, esClient *redis.Client, tracer trace.Tracer, logger mflog.Logger) (clients.Service, groups.Service, tpolicies.Service) { + database := postgres.NewDatabase(db, tracer) + cRepo := cpostgres.NewRepository(database) + gRepo := gpostgres.NewRepository(database) + pRepo := ppostgres.NewRepository(database) - thingsRepo := thingsPg.NewThingRepository(database) - thingsRepo = tracing.ThingRepositoryMiddleware(dbTracer, thingsRepo) + idp := uuid.New() - channelsRepo := thingsPg.NewChannelRepository(database) - channelsRepo = tracing.ChannelRepositoryMiddleware(dbTracer, channelsRepo) + policyCache := redispcache.NewCache(cacheClient) + thingCache := redisthcache.NewCache(cacheClient) - chanCache := rediscache.NewChannelCache(cacheClient) - chanCache = tracing.ChannelCacheMiddleware(cacheTracer, chanCache) + csvc := clients.NewService(auth, cRepo, thingCache, idp) + gsvc := groups.NewService(auth, gRepo, idp) + psvc := tpolicies.NewService(auth, pRepo, thingCache, policyCache, idp) - thingCache := rediscache.NewThingCache(cacheClient) - thingCache = tracing.ThingCacheMiddleware(cacheTracer, thingCache) - idProvider := uuid.New() + csvc = redisthcache.NewEventStoreMiddleware(csvc, esClient) + gsvc = redischcache.NewEventStoreMiddleware(gsvc, esClient) + psvc = redispcache.NewEventStoreMiddleware(psvc, esClient) - svc := things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) - svc = rediscache.NewEventStoreMiddleware(svc, esClient) - svc = api.LoggingMiddleware(svc, logger) + csvc = ctracing.TracingMiddleware(csvc, tracer) + csvc = capi.LoggingMiddleware(csvc, logger) counter, latency := internal.MakeMetrics(svcName, "api") - svc = api.MetricsMiddleware(svc, counter, latency) - - return svc + csvc = capi.MetricsMiddleware(csvc, counter, latency) + + gsvc = gtracing.TracingMiddleware(gsvc, tracer) + gsvc = gapi.LoggingMiddleware(gsvc, logger) + counter, latency = internal.MakeMetrics(fmt.Sprintf("%s_groups", svcName), "api") + gsvc = gapi.MetricsMiddleware(gsvc, counter, latency) + psvc = ppracing.TracingMiddleware(psvc, tracer) + psvc = papi.LoggingMiddleware(psvc, logger) + counter, latency = internal.MakeMetrics(fmt.Sprintf("%s_policies", svcName), "api") + psvc = papi.MetricsMiddleware(psvc, counter, latency) + + return csvc, gsvc, psvc } diff --git a/cmd/timescale-reader/main.go b/cmd/timescale-reader/main.go index 93ed1fee2f..4b0e07b114 100644 --- a/cmd/timescale-reader/main.go +++ b/cmd/timescale-reader/main.go @@ -34,7 +34,7 @@ const ( type config struct { LogLevel string `env:"MF_TIMESCALE_READER_LOG_LEVEL" envDefault:"info"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -63,7 +63,7 @@ func main() { repo := newService(db, logger) - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + auth, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -81,7 +81,7 @@ func main() { if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(repo, tc, auth, svcName), logger) g.Go(func() error { return hs.Start() diff --git a/cmd/timescale-writer/main.go b/cmd/timescale-writer/main.go index ad211a2043..7a46c29cab 100644 --- a/cmd/timescale-writer/main.go +++ b/cmd/timescale-writer/main.go @@ -61,11 +61,16 @@ func main() { } defer db.Close() - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) + logger.Error(fmt.Sprintf("Failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) repo := newService(db, logger) diff --git a/cmd/twins/main.go b/cmd/twins/main.go index c225ea8a77..e898db4418 100644 --- a/cmd/twins/main.go +++ b/cmd/twins/main.go @@ -10,7 +10,6 @@ import ( "os" "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/internal" authClient "github.com/mainflux/mainflux/internal/clients/grpc/auth" jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" @@ -24,15 +23,16 @@ import ( "github.com/mainflux/mainflux/pkg/messaging/brokers" pstracing "github.com/mainflux/mainflux/pkg/messaging/tracing" "github.com/mainflux/mainflux/pkg/uuid" - localusers "github.com/mainflux/mainflux/things/standalone" + localusers "github.com/mainflux/mainflux/things/clients/standalone" "github.com/mainflux/mainflux/twins" "github.com/mainflux/mainflux/twins/api" twapi "github.com/mainflux/mainflux/twins/api/http" twmongodb "github.com/mainflux/mainflux/twins/mongodb" rediscache "github.com/mainflux/mainflux/twins/redis" "github.com/mainflux/mainflux/twins/tracing" - opentracing "github.com/opentracing/opentracing-go" + "github.com/mainflux/mainflux/users/policies" "go.mongodb.org/mongo-driver/mongo" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" ) @@ -47,11 +47,11 @@ const ( type config struct { LogLevel string `env:"MF_TWINS_LOG_LEVEL" envDefault:"info"` - StandaloneEmail string `env:"MF_TWINS_STANDALONE_EMAIL" envDefault:""` + StandaloneID string `env:"MF_TWINS_STANDALONE_ID" envDefault:""` StandaloneToken string `env:"MF_TWINS_STANDALONE_TOKEN" envDefault:""` ChannelID string `env:"MF_TWINS_CHANNEL_ID" envDefault:""` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -74,29 +74,28 @@ func main() { } defer cacheClient.Close() - cacheTracer, cacheCloser, err := jaegerClient.NewTracer("twins_cache", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer cacheCloser.Close() - db, err := mongoClient.Setup(envPrefix) if err != nil { logger.Fatal(fmt.Sprintf("failed to setup postgres database : %s", err)) } - dbTracer, dbCloser, err := jaegerClient.NewTracer("twins_db", cfg.JaegerURL) + tp, err := jaegerClient.NewProvider("twins_db", cfg.JaegerURL) if err != nil { logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) } - defer dbCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) - var auth mainflux.AuthServiceClient - switch cfg.StandaloneEmail != "" && cfg.StandaloneToken != "" { + var auth policies.AuthServiceClient + switch cfg.StandaloneID != "" && cfg.StandaloneToken != "" { case true: - auth = localusers.NewAuthService(cfg.StandaloneEmail, cfg.StandaloneToken) + auth = localusers.NewAuthService(cfg.StandaloneID, cfg.StandaloneToken) default: - authServiceClient, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) + authServiceClient, authHandler, err := authClient.Setup(envPrefix, svcName, cfg.JaegerURL) if err != nil { logger.Fatal(err.Error()) } @@ -105,12 +104,6 @@ func main() { logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) } - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer traceCloser.Close() - pubSub, err := brokers.NewPubSub(cfg.BrokerURL, queue, logger) if err != nil { logger.Fatal(fmt.Sprintf("failed to connect to message broker: %s", err)) @@ -118,13 +111,13 @@ func main() { pubSub = pstracing.NewPubSub(tracer, pubSub) defer pubSub.Close() - svc := newService(ctx, svcName, pubSub, cfg.ChannelID, auth, dbTracer, db, cacheTracer, cacheClient, logger) + svc := newService(ctx, svcName, pubSub, cfg.ChannelID, auth, tracer, db, cacheClient, logger) httpServerConfig := server.Config{Port: defSvcHttpPort} if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, twapi.MakeHandler(tracer, svc, logger), logger) + hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, twapi.MakeHandler(svc, logger), logger) g.Go(func() error { return hs.Start() @@ -139,16 +132,16 @@ func main() { } } -func newService(ctx context.Context, id string, ps messaging.PubSub, chanID string, users mainflux.AuthServiceClient, dbTracer opentracing.Tracer, db *mongo.Database, cacheTracer opentracing.Tracer, cacheClient *redis.Client, logger mflog.Logger) twins.Service { +func newService(ctx context.Context, id string, ps messaging.PubSub, chanID string, users policies.AuthServiceClient, tracer trace.Tracer, db *mongo.Database, cacheClient *redis.Client, logger mflog.Logger) twins.Service { twinRepo := twmongodb.NewTwinRepository(db) - twinRepo = tracing.TwinRepositoryMiddleware(dbTracer, twinRepo) + twinRepo = tracing.TwinRepositoryMiddleware(tracer, twinRepo) stateRepo := twmongodb.NewStateRepository(db) - stateRepo = tracing.StateRepositoryMiddleware(dbTracer, stateRepo) + stateRepo = tracing.StateRepositoryMiddleware(tracer, stateRepo) idProvider := uuid.New() twinCache := rediscache.NewTwinCache(cacheClient) - twinCache = tracing.TwinCacheMiddleware(cacheTracer, twinCache) + twinCache = tracing.TwinCacheMiddleware(tracer, twinCache) svc := twins.New(ps, users, twinRepo, twinCache, stateRepo, idProvider, chanID, logger) svc = api.LoggingMiddleware(svc, logger) diff --git a/cmd/users/main.go b/cmd/users/main.go index fa513a1d17..da29211ffe 100644 --- a/cmd/users/main.go +++ b/cmd/users/main.go @@ -9,48 +9,66 @@ import ( "log" "os" "regexp" + "time" + "github.com/go-zoo/bone" + "github.com/jmoiron/sqlx" "github.com/mainflux/mainflux/internal" - authClient "github.com/mainflux/mainflux/internal/clients/grpc/auth" + jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" pgClient "github.com/mainflux/mainflux/internal/clients/postgres" "github.com/mainflux/mainflux/internal/email" "github.com/mainflux/mainflux/internal/env" + "github.com/mainflux/mainflux/internal/postgres" "github.com/mainflux/mainflux/internal/server" + grpcserver "github.com/mainflux/mainflux/internal/server/grpc" httpserver "github.com/mainflux/mainflux/internal/server/http" - "github.com/mainflux/mainflux/pkg/errors" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - "github.com/mainflux/mainflux/users/bcrypt" - "github.com/mainflux/mainflux/users/emailer" - "github.com/mainflux/mainflux/users/tracing" + "github.com/mainflux/mainflux/users/clients" + capi "github.com/mainflux/mainflux/users/clients/api" + "github.com/mainflux/mainflux/users/clients/emailer" + cpostgres "github.com/mainflux/mainflux/users/clients/postgres" + ctracing "github.com/mainflux/mainflux/users/clients/tracing" + "github.com/mainflux/mainflux/users/groups" + gapi "github.com/mainflux/mainflux/users/groups/api" + gpostgres "github.com/mainflux/mainflux/users/groups/postgres" + gtracing "github.com/mainflux/mainflux/users/groups/tracing" + "github.com/mainflux/mainflux/users/hasher" + "github.com/mainflux/mainflux/users/jwt" + "github.com/mainflux/mainflux/users/policies" + grpcapi "github.com/mainflux/mainflux/users/policies/api/grpc" + papi "github.com/mainflux/mainflux/users/policies/api/http" + ppostgres "github.com/mainflux/mainflux/users/policies/postgres" + ptracing "github.com/mainflux/mainflux/users/policies/tracing" + clientsPg "github.com/mainflux/mainflux/users/postgres" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" - - "github.com/jmoiron/sqlx" - "github.com/mainflux/mainflux" - jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" - mflog "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/users/api" - usersPg "github.com/mainflux/mainflux/users/postgres" - opentracing "github.com/opentracing/opentracing-go" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" ) const ( svcName = "users" envPrefix = "MF_USERS_" envPrefixHttp = "MF_USERS_HTTP_" + envPrefixGrpc = "MF_USERS_GRPC_" defDB = "users" defSvcHttpPort = "9002" + defSvcGrpcPort = "9192" ) type config struct { - LogLevel string `env:"MF_USERS_LOG_LEVEL" envDefault:"info"` - AdminEmail string `env:"MF_USERS_ADMIN_EMAIL" envDefault:""` - AdminPassword string `env:"MF_USERS_ADMIN_PASSWORD" envDefault:""` - PassRegexText string `env:"MF_USERS_PASS_REGEX" envDefault:"^.{8,}$"` - SelfRegister bool `env:"MF_USERS_ALLOW_SELF_REGISTER" envDefault:"true"` - ResetURL string `env:"MF_TOKEN_RESET_ENDPOINT" envDefault:"email.tmpl"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` - PassRegex *regexp.Regexp + LogLevel string `env:"MF_USERS_LOG_LEVEL" envDefault:"info"` + SecretKey string `env:"MF_USERS_SECRET_KEY" envDefault:"secret"` + AdminEmail string `env:"MF_USERS_ADMIN_EMAIL" envDefault:""` + AdminPassword string `env:"MF_USERS_ADMIN_PASSWORD" envDefault:""` + PassRegexText string `env:"MF_USERS_PASS_REGEX" envDefault:"^.{8,}$"` + AccessDuration string `env:"MF_USERS_ACCESS_TOKEN_DURATION" envDefault:"15m"` + RefreshDuration string `env:"MF_USERS_REFRESH_TOKEN_DURATION" envDefault:"24h"` + ResetURL string `env:"MF_TOKEN_RESET_ENDPOINT" envDefault:"/reset-request"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` + PassRegex *regexp.Regexp } func main() { @@ -59,168 +77,167 @@ func main() { cfg := config{} if err := env.Parse(&cfg); err != nil { - log.Fatalf("failed to load %s configuration : %s", svcName, err) - } - - logger, err := mflog.New(os.Stdout, cfg.LogLevel) - if err != nil { - log.Fatalf("failed to init logger: %s", err) + log.Fatalf("failed to load %s configuration : %s", svcName, err.Error()) } passRegex, err := regexp.Compile(cfg.PassRegexText) if err != nil { - log.Fatalf("Invalid password validation rules %s\n", cfg.PassRegexText) + log.Fatalf("invalid password validation rules %s\n", cfg.PassRegexText) } cfg.PassRegex = passRegex + logger, err := mflog.New(os.Stdout, cfg.LogLevel) + if err != nil { + logger.Fatal(fmt.Sprintf("failed to init logger: %s", err.Error())) + } + ec := email.Config{} if err := env.Parse(&ec); err != nil { - logger.Fatal(fmt.Sprintf("failed to load email configuration : %s", err)) + logger.Fatal(fmt.Sprintf("failed to load email configuration : %s", err.Error())) } dbConfig := pgClient.Config{Name: defDB} - db, err := pgClient.SetupWithConfig(envPrefix, *usersPg.Migration(), dbConfig) + db, err := pgClient.SetupWithConfig(envPrefix, *clientsPg.Migration(), dbConfig) if err != nil { logger.Fatal(err.Error()) } defer db.Close() - auth, authHandler, err := authClient.Setup(envPrefix, cfg.JaegerURL) - if err != nil { - logger.Fatal(err.Error()) - } - defer authHandler.Close() - logger.Info("Successfully connected to auth grpc server " + authHandler.Secure()) - - dbTracer, dbCloser, err := jaegerClient.NewTracer("auth_db", cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) } - defer dbCloser.Close() - - svc := newService(db, dbTracer, auth, cfg, ec, logger) + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) - tracer, closer, err := jaegerClient.NewTracer("users", cfg.JaegerURL) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) - } - defer closer.Close() + csvc, gsvc, psvc := newService(db, tracer, cfg, ec, logger) httpServerConfig := server.Config{Port: defSvcHttpPort} if err := env.Parse(&httpServerConfig, env.Options{Prefix: envPrefixHttp, AltPrefix: envPrefix}); err != nil { - logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err)) + logger.Fatal(fmt.Sprintf("failed to load %s HTTP server configuration : %s", svcName, err.Error())) + } + mux := bone.New() + hsc := httpserver.New(ctx, cancel, svcName, httpServerConfig, capi.MakeClientsHandler(csvc, mux, logger), logger) + hsg := httpserver.New(ctx, cancel, svcName, httpServerConfig, gapi.MakeGroupsHandler(gsvc, mux, logger), logger) + hsp := httpserver.New(ctx, cancel, svcName, httpServerConfig, papi.MakePolicyHandler(psvc, mux, logger), logger) + + registerAuthServiceServer := func(srv *grpc.Server) { + reflection.Register(srv) + policies.RegisterAuthServiceServer(srv, grpcapi.NewServer(csvc, psvc)) + + } + grpcServerConfig := server.Config{Port: defSvcGrpcPort} + if err := env.Parse(&grpcServerConfig, env.Options{Prefix: envPrefixGrpc, AltPrefix: envPrefix}); err != nil { + log.Fatalf("failed to load %s gRPC server configuration : %s", svcName, err.Error()) } - hs := httpserver.New(ctx, cancel, svcName, httpServerConfig, api.MakeHandler(svc, tracer, logger), logger) + gs := grpcserver.New(ctx, cancel, svcName, grpcServerConfig, registerAuthServiceServer, logger) g.Go(func() error { - return hs.Start() + return hsp.Start() + }) + g.Go(func() error { + return gs.Start() }) g.Go(func() error { - return server.StopSignalHandler(ctx, cancel, logger, svcName, hs) + return server.StopSignalHandler(ctx, cancel, logger, svcName, hsc, hsg, hsp, gs) }) if err := g.Wait(); err != nil { - logger.Error(fmt.Sprintf("Users service terminated: %s", err)) + logger.Error(fmt.Sprintf("users service terminated: %s", err)) } } -func newService(db *sqlx.DB, tracer opentracing.Tracer, auth mainflux.AuthServiceClient, c config, ec email.Config, logger mflog.Logger) users.Service { - database := usersPg.NewDatabase(db) - hasher := bcrypt.New() - userRepo := tracing.UserRepositoryMiddleware(tracer, usersPg.NewUserRepo(database)) +func newService(db *sqlx.DB, tracer trace.Tracer, c config, ec email.Config, logger mflog.Logger) (clients.Service, groups.GroupService, policies.PolicyService) { + database := postgres.NewDatabase(db, tracer) + cRepo := cpostgres.NewClientRepo(database) + gRepo := gpostgres.NewGroupRepo(database) + pRepo := ppostgres.NewPolicyRepo(database) - emailer, err := emailer.New(c.ResetURL, &ec) + idp := uuid.New() + hsr := hasher.New() + + aDuration, err := time.ParseDuration(c.AccessDuration) if err != nil { - logger.Error(fmt.Sprintf("Failed to configure e-mailing util: %s", err)) + logger.Error(fmt.Sprintf("failed to parse access token duration: %s", err.Error())) } + rDuration, err := time.ParseDuration(c.RefreshDuration) + if err != nil { + logger.Error(fmt.Sprintf("failed to parse refresh token duration: %s", err.Error())) + } + tokenizer := jwt.NewTokenRepo([]byte(c.SecretKey), aDuration, rDuration) + tokenizer = jwt.NewTokenRepoMiddleware(tokenizer, tracer) - idProvider := uuid.New() + emailer, err := emailer.New(c.ResetURL, &ec) + if err != nil { + logger.Error(fmt.Sprintf("failed to configure e-mailing util: %s", err.Error())) + } + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, hsr, idp, c.PassRegex) + gsvc := groups.NewService(gRepo, pRepo, tokenizer, idp) + psvc := policies.NewService(pRepo, tokenizer, idp) - svc := users.New(userRepo, hasher, auth, emailer, idProvider, c.PassRegex) - svc = api.LoggingMiddleware(svc, logger) + csvc = ctracing.TracingMiddleware(csvc, tracer) + csvc = capi.LoggingMiddleware(csvc, logger) counter, latency := internal.MakeMetrics(svcName, "api") - svc = api.MetricsMiddleware(svc, counter, latency) - - if err := createAdmin(svc, userRepo, c, auth); err != nil { - logger.Fatal(fmt.Sprintf("failed to create admin user: %s", err)) - } - - switch c.SelfRegister { - case true: - // If MF_USERS_ALLOW_SELF_REGISTER environment variable is "true", - // everybody can create a new user. Here, check the existence of that - // policy. If the policy does not exist, create it; otherwise, there is - // no need to do anything further. - _, err := auth.Authorize(context.Background(), &mainflux.AuthorizeReq{Obj: "user", Act: "create", Sub: "*"}) - if err != nil { - // Add a policy that allows anybody to create a user - apr, err := auth.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Obj: "user", Act: "create", Sub: "*"}) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to add the policy related to MF_USERS_ALLOW_SELF_REGISTER: %s", err)) - } - if !apr.GetAuthorized() { - logger.Fatal(fmt.Sprintf("failed to authorized the policy result related to MF_USERS_ALLOW_SELF_REGISTER: " + errors.ErrAuthorization.Error())) - } - } - default: - // If MF_USERS_ALLOW_SELF_REGISTER environment variable is "false", - // everybody cannot create a new user. Therefore, delete a policy that - // allows everybody to create a new user. - dpr, err := auth.DeletePolicy(context.Background(), &mainflux.DeletePolicyReq{Obj: "user", Act: "create", Sub: "*"}) - if err != nil { - logger.Fatal(fmt.Sprintf("failed to delete a policy: %s", err)) - } - if !dpr.GetDeleted() { - logger.Fatal("deleting a policy expected to succeed.") - } - } + csvc = capi.MetricsMiddleware(csvc, counter, latency) - return svc -} + gsvc = gtracing.TracingMiddleware(gsvc, tracer) + gsvc = gapi.LoggingMiddleware(gsvc, logger) + counter, latency = internal.MakeMetrics("groups", "api") + gsvc = gapi.MetricsMiddleware(gsvc, counter, latency) -func createAdmin(svc users.Service, userRepo users.UserRepository, c config, auth mainflux.AuthServiceClient) error { - user := users.User{ - Email: c.AdminEmail, - Password: c.AdminPassword, - } - - if admin, err := userRepo.RetrieveByEmail(context.Background(), user.Email); err == nil { - // The admin is already created. Check existence of the admin policy. - _, err := auth.Authorize(context.Background(), &mainflux.AuthorizeReq{Obj: "authorities", Act: "member", Sub: admin.ID}) - if err != nil { - apr, err := auth.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Obj: "authorities", Act: "member", Sub: admin.ID}) - if err != nil { - return err - } - if !apr.GetAuthorized() { - return errors.ErrAuthorization - } - } - return nil + psvc = ptracing.TracingMiddleware(psvc, tracer) + psvc = papi.LoggingMiddleware(psvc, logger) + counter, latency = internal.MakeMetrics("policies", "api") + psvc = papi.MetricsMiddleware(psvc, counter, latency) + + if err := createAdmin(c, cRepo, hsr, csvc); err != nil { + logger.Error(fmt.Sprintf("failed to create admin client: %s", err)) } + return csvc, gsvc, psvc +} - // Add a policy that allows anybody to create a user - apr, err := auth.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Obj: "user", Act: "create", Sub: "*"}) +func createAdmin(c config, crepo mfclients.Repository, hsr clients.Hasher, svc clients.Service) error { + id, err := uuid.New().ID() + if err != nil { + return err + } + hash, err := hsr.Hash(c.AdminPassword) if err != nil { return err } - if !apr.GetAuthorized() { - return errors.ErrAuthorization + + client := mfclients.Client{ + ID: id, + Name: "admin", + Credentials: mfclients.Credentials{ + Identity: c.AdminEmail, + Secret: hash, + }, + Metadata: mfclients.Metadata{ + "role": "admin", + }, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Role: mfclients.AdminRole, + Status: mfclients.EnabledStatus, + } + + if _, err := crepo.RetrieveByIdentity(context.Background(), client.Credentials.Identity); err == nil { + return nil } // Create an admin - uid, err := svc.Register(context.Background(), "", user) - if err != nil { + if _, err = crepo.Save(context.Background(), client); err != nil { return err } - - apr, err = auth.AddPolicy(context.Background(), &mainflux.AddPolicyReq{Obj: "authorities", Act: "member", Sub: uid}) + _, err = svc.IssueToken(context.Background(), c.AdminEmail, c.AdminPassword) if err != nil { return err } - if !apr.GetAuthorized() { - return errors.ErrAuthorization - } return nil } diff --git a/cmd/ws/main.go b/cmd/ws/main.go index f712743903..a210f7dc46 100644 --- a/cmd/ws/main.go +++ b/cmd/ws/main.go @@ -9,8 +9,7 @@ import ( "log" "os" - "github.com/mainflux/mainflux" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" "github.com/mainflux/mainflux/internal" @@ -23,6 +22,7 @@ import ( "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/pkg/messaging/brokers" pstracing "github.com/mainflux/mainflux/pkg/messaging/tracing" + "github.com/mainflux/mainflux/things/policies" adapter "github.com/mainflux/mainflux/ws" "github.com/mainflux/mainflux/ws/api" "github.com/mainflux/mainflux/ws/tracing" @@ -38,7 +38,7 @@ const ( type config struct { LogLevel string `env:"MF_WS_ADAPTER_LOG_LEVEL" envDefault:"info"` BrokerURL string `env:"MF_BROKER_URL" envDefault:"nats://localhost:4222"` - JaegerURL string `env:"MF_JAEGER_URL" envDefault:"localhost:6831"` + JaegerURL string `env:"MF_JAEGER_URL" envDefault:"http://jaeger:14268/api/traces"` } func main() { @@ -62,11 +62,16 @@ func main() { defer internal.Close(logger, tcHandler) logger.Info("Successfully connected to things grpc server " + tcHandler.Secure()) - tracer, traceCloser, err := jaegerClient.NewTracer(svcName, cfg.JaegerURL) + tp, err := jaegerClient.NewProvider(svcName, cfg.JaegerURL) if err != nil { logger.Fatal(fmt.Sprintf("failed to init Jaeger: %s", err)) } - defer traceCloser.Close() + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + logger.Error(fmt.Sprintf("Error shutting down tracer provider: %v", err)) + } + }() + tracer := tp.Tracer(svcName) nps, err := brokers.NewPubSub(cfg.BrokerURL, "", logger) if err != nil { @@ -96,7 +101,7 @@ func main() { } } -func newService(tc mainflux.ThingsServiceClient, nps messaging.PubSub, logger mflog.Logger, tracer opentracing.Tracer) adapter.Service { +func newService(tc policies.ThingsServiceClient, nps messaging.PubSub, logger mflog.Logger, tracer trace.Tracer) adapter.Service { svc := adapter.New(tc, nps) svc = tracing.New(tracer, svc) svc = api.LoggingMiddleware(svc, logger) diff --git a/coap/adapter.go b/coap/adapter.go index cb4fa0b1b6..d4b7f0be0d 100644 --- a/coap/adapter.go +++ b/coap/adapter.go @@ -12,8 +12,8 @@ import ( "sync" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/messaging" ) @@ -39,13 +39,13 @@ var _ Service = (*adapterService)(nil) // Observers is a map of maps, type adapterService struct { - auth mainflux.ThingsServiceClient + auth policies.ThingsServiceClient pubsub messaging.PubSub obsLock sync.Mutex } // New instantiates the CoAP adapter implementation. -func New(auth mainflux.ThingsServiceClient, pubsub messaging.PubSub) Service { +func New(auth policies.ThingsServiceClient, pubsub messaging.PubSub) Service { as := &adapterService{ auth: auth, pubsub: pubsub, @@ -56,27 +56,38 @@ func New(auth mainflux.ThingsServiceClient, pubsub messaging.PubSub) Service { } func (svc *adapterService) Publish(ctx context.Context, key string, msg *messaging.Message) error { - ar := &mainflux.AccessByKeyReq{ - Token: key, - ChanID: msg.Channel, + ar := &policies.AuthorizeReq{ + Sub: key, + Obj: msg.Channel, + Act: policies.WriteAction, + EntityType: policies.GroupEntityType, } - thid, err := svc.auth.CanAccessByKey(ctx, ar) + res, err := svc.auth.Authorize(ctx, ar) if err != nil { return errors.Wrap(errors.ErrAuthorization, err) } - msg.Publisher = thid.GetValue() + if !res.GetAuthorized() { + return errors.ErrAuthorization + } + msg.Publisher = res.GetThingID() return svc.pubsub.Publish(ctx, msg.Channel, msg) } func (svc *adapterService) Subscribe(ctx context.Context, key, chanID, subtopic string, c Client) error { - ar := &mainflux.AccessByKeyReq{ - Token: key, - ChanID: chanID, + ar := &policies.AuthorizeReq{ + Sub: key, + Obj: chanID, + Act: policies.ReadAction, + EntityType: policies.GroupEntityType, } - if _, err := svc.auth.CanAccessByKey(ctx, ar); err != nil { + res, err := svc.auth.Authorize(ctx, ar) + if err != nil { return errors.Wrap(errors.ErrAuthorization, err) } + if !res.GetAuthorized() { + return errors.ErrAuthorization + } subject := fmt.Sprintf("%s.%s", chansPrefix, chanID) if subtopic != "" { subject = fmt.Sprintf("%s.%s", subject, subtopic) @@ -85,13 +96,19 @@ func (svc *adapterService) Subscribe(ctx context.Context, key, chanID, subtopic } func (svc *adapterService) Unsubscribe(ctx context.Context, key, chanID, subtopic, token string) error { - ar := &mainflux.AccessByKeyReq{ - Token: key, - ChanID: chanID, + ar := &policies.AuthorizeReq{ + Sub: key, + Obj: chanID, + Act: policies.ReadAction, + EntityType: policies.GroupEntityType, } - if _, err := svc.auth.CanAccessByKey(ctx, ar); err != nil { + res, err := svc.auth.Authorize(ctx, ar) + if err != nil { return errors.Wrap(errors.ErrAuthorization, err) } + if !res.GetAuthorized() { + return errors.ErrAuthorization + } subject := fmt.Sprintf("%s.%s", chansPrefix, chanID) if subtopic != "" { subject = fmt.Sprintf("%s.%s", subject, subtopic) diff --git a/coap/api/logging.go b/coap/api/logging.go index dd09685756..81d43d4283 100644 --- a/coap/api/logging.go +++ b/coap/api/logging.go @@ -11,19 +11,19 @@ import ( "time" "github.com/mainflux/mainflux/coap" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" ) var _ coap.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc coap.Service } // LoggingMiddleware adds logging facilities to the adapter. -func LoggingMiddleware(svc coap.Service, logger log.Logger) coap.Service { +func LoggingMiddleware(svc coap.Service, logger mflog.Logger) coap.Service { return &loggingMiddleware{logger, svc} } diff --git a/coap/api/transport.go b/coap/api/transport.go index 16fc8a62a5..f533693594 100644 --- a/coap/api/transport.go +++ b/coap/api/transport.go @@ -6,7 +6,7 @@ package api import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "regexp" @@ -18,7 +18,7 @@ import ( "github.com/go-zoo/bone" "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/coap" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/plgd-dev/go-coap/v2/message" "github.com/plgd-dev/go-coap/v2/message/codes" @@ -45,7 +45,7 @@ var ( ) var ( - logger log.Logger + logger mflog.Logger service coap.Service ) @@ -59,7 +59,7 @@ func MakeHTTPHandler() http.Handler { } // MakeCoAPHandler creates handler for CoAP messages. -func MakeCoAPHandler(svc coap.Service, l log.Logger) mux.HandlerFunc { +func MakeCoAPHandler(svc coap.Service, l mflog.Logger) mux.HandlerFunc { logger = l service = svc @@ -156,7 +156,7 @@ func decodeMessage(msg *mux.Message) (*messaging.Message, error) { } if msg.Body != nil { - buff, err := ioutil.ReadAll(msg.Body) + buff, err := io.ReadAll(msg.Body) if err != nil { return ret, err } diff --git a/coap/tracing/adapter.go b/coap/tracing/adapter.go index bea97a2b8d..4394063b8f 100644 --- a/coap/tracing/adapter.go +++ b/coap/tracing/adapter.go @@ -5,7 +5,7 @@ import ( "github.com/mainflux/mainflux/coap" "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) var _ coap.Service = (*tracingServiceMiddleware)(nil) @@ -13,18 +13,18 @@ var _ coap.Service = (*tracingServiceMiddleware)(nil) // Operation names for tracing CoAP operations. const ( publishOP = "publish_op" - subscribeOP = "subscirbe_op" + subscribeOP = "subscribe_op" unsubscribeOP = "unsubscribe_op" ) -// tracingServiceMiddleware is a middleware implementation for tracing CoAP service operations using OpenTracing. +// tracingServiceMiddleware is a middleware implementation for tracing CoAP service operations using OpenTelemetry. type tracingServiceMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer svc coap.Service } // New creates a new instance of TracingServiceMiddleware that wraps an existing CoAP service with tracing capabilities. -func New(tracer opentracing.Tracer, svc coap.Service) coap.Service { +func New(tracer trace.Tracer, svc coap.Service) coap.Service { return &tracingServiceMiddleware{ tracer: tracer, svc: svc, @@ -33,35 +33,21 @@ func New(tracer opentracing.Tracer, svc coap.Service) coap.Service { // Publish traces a CoAP publish operation. func (tm *tracingServiceMiddleware) Publish(ctx context.Context, key string, msg *messaging.Message) error { - span := tm.createSpan(ctx, publishOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := tm.tracer.Start(ctx, publishOP) + defer span.End() return tm.svc.Publish(ctx, key, msg) } // Subscribe traces a CoAP subscribe operation. func (tm *tracingServiceMiddleware) Subscribe(ctx context.Context, key string, chanID string, subtopic string, c coap.Client) error { - span := tm.createSpan(ctx, subscribeOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := tm.tracer.Start(ctx, subscribeOP) + defer span.End() return tm.svc.Subscribe(ctx, key, chanID, subtopic, c) } // Unsubscribe traces a CoAP unsubscribe operation. func (tm *tracingServiceMiddleware) Unsubscribe(ctx context.Context, key string, chanID string, subptopic string, token string) error { - span := tm.createSpan(ctx, unsubscribeOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := tm.tracer.Start(ctx, unsubscribeOP) + defer span.End() return tm.svc.Unsubscribe(ctx, key, chanID, subptopic, token) } - -// createSpan creates an OpenTracing span with an operation name and an optional parent span. -func (tm *tracingServiceMiddleware) createSpan(ctx context.Context, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tm.tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tm.tracer.StartSpan(opName) -} diff --git a/consumers/notifiers/api/endpoint_test.go b/consumers/notifiers/api/endpoint_test.go index eac99d0d74..2773f65fa6 100644 --- a/consumers/notifiers/api/endpoint_test.go +++ b/consumers/notifiers/api/endpoint_test.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "strings" @@ -21,7 +20,6 @@ import ( "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/uuid" - "github.com/opentracing/opentracing-go/mocktracer" "github.com/stretchr/testify/assert" ) @@ -76,7 +74,7 @@ func newService(tokens map[string]string) notifiers.Service { func newServer(svc notifiers.Service) *httptest.Server { logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, mocktracer.New(), logger) + mux := httpapi.MakeHandler(svc, logger) return httptest.NewServer(mux) } @@ -257,7 +255,7 @@ func TestView(t *testing.T) { } res, err := req.make() assert.Nil(t, err, fmt.Sprintf("%s: unexpected request error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) assert.Nil(t, err, fmt.Sprintf("%s: unexpected read error %s", tc.desc, err)) data := strings.Trim(string(body), "\n") assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) @@ -377,7 +375,7 @@ func TestList(t *testing.T) { } res, err := req.make() assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) data := strings.Trim(string(body), "\n") assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) diff --git a/consumers/notifiers/api/logging.go b/consumers/notifiers/api/logging.go index 935e4ef8e4..bea37a13ce 100644 --- a/consumers/notifiers/api/logging.go +++ b/consumers/notifiers/api/logging.go @@ -11,18 +11,18 @@ import ( "time" notifiers "github.com/mainflux/mainflux/consumers/notifiers" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" ) var _ notifiers.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc notifiers.Service } // LoggingMiddleware adds logging facilities to the core service. -func LoggingMiddleware(svc notifiers.Service, logger log.Logger) notifiers.Service { +func LoggingMiddleware(svc notifiers.Service, logger mflog.Logger) notifiers.Service { return &loggingMiddleware{logger, svc} } diff --git a/consumers/notifiers/api/transport.go b/consumers/notifiers/api/transport.go index 1eb9ca15cd..ebbb5c45ca 100644 --- a/consumers/notifiers/api/transport.go +++ b/consumers/notifiers/api/transport.go @@ -9,7 +9,6 @@ import ( "net/http" "strings" - kitot "github.com/go-kit/kit/tracing/opentracing" kithttp "github.com/go-kit/kit/transport/http" "github.com/go-zoo/bone" "github.com/mainflux/mainflux" @@ -17,8 +16,8 @@ import ( "github.com/mainflux/mainflux/internal/apiutil" "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" - opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" ) const ( @@ -32,7 +31,7 @@ const ( ) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc notifiers.Service, tracer opentracing.Tracer, logger logger.Logger) http.Handler { +func MakeHandler(svc notifiers.Service, logger logger.Logger) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), } @@ -40,28 +39,28 @@ func MakeHandler(svc notifiers.Service, tracer opentracing.Tracer, logger logger mux := bone.New() mux.Post("/subscriptions", kithttp.NewServer( - kitot.TraceServer(tracer, "create_subscription")(createSubscriptionEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("create_subscription"))(createSubscriptionEndpoint(svc)), decodeCreate, encodeResponse, opts..., )) mux.Get("/subscriptions/:subID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_subscription")(viewSubscriptionEndpint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("view_subscription"))(viewSubscriptionEndpint(svc)), decodeSubscription, encodeResponse, opts..., )) mux.Get("/subscriptions", kithttp.NewServer( - kitot.TraceServer(tracer, "list_subscriptions")(listSubscriptionsEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("list_subscriptions"))(listSubscriptionsEndpoint(svc)), decodeList, encodeResponse, opts..., )) mux.Delete("/subscriptions/:subID", kithttp.NewServer( - kitot.TraceServer(tracer, "delete_subscription")(deleteSubscriptionEndpint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("delete_subscription"))(deleteSubscriptionEndpint(svc)), decodeSubscription, encodeResponse, opts..., diff --git a/consumers/notifiers/mocks/auth.go b/consumers/notifiers/mocks/auth.go index 3d590098ad..beb2d20f83 100644 --- a/consumers/notifiers/mocks/auth.go +++ b/consumers/notifiers/mocks/auth.go @@ -6,60 +6,51 @@ package mocks import ( "context" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" "google.golang.org/grpc" ) -var _ mainflux.AuthServiceClient = (*authServiceMock)(nil) +var _ policies.AuthServiceClient = (*authServiceMock)(nil) type authServiceMock struct { users map[string]string } // NewAuth creates mock of auth service. -func NewAuth(users map[string]string) mainflux.AuthServiceClient { +func NewAuth(users map[string]string) policies.AuthServiceClient { return &authServiceMock{users} } -func (svc authServiceMock) Identify(ctx context.Context, in *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { +func (svc authServiceMock) Identify(ctx context.Context, in *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { if id, ok := svc.users[in.Value]; ok { - return &mainflux.UserIdentity{Id: id, Email: id}, nil + return &policies.UserIdentity{Id: id}, nil } return nil, errors.ErrAuthentication } -func (svc authServiceMock) Issue(ctx context.Context, in *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { +func (svc authServiceMock) Issue(ctx context.Context, in *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { if id, ok := svc.users[in.GetEmail()]; ok { switch in.Type { default: - return &mainflux.Token{Value: id}, nil + return &policies.Token{Value: id}, nil } } return nil, errors.ErrAuthentication } -func (svc authServiceMock) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { +func (svc authServiceMock) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { panic("not implemented") } -func (svc authServiceMock) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { +func (svc authServiceMock) AddPolicy(ctx context.Context, in *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { panic("not implemented") } -func (svc authServiceMock) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { +func (svc authServiceMock) DeletePolicy(ctx context.Context, in *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { panic("not implemented") } -func (svc authServiceMock) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { - panic("not implemented") -} - -func (svc authServiceMock) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - panic("not implemented") -} - -func (svc authServiceMock) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { +func (svc authServiceMock) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { panic("not implemented") } diff --git a/consumers/notifiers/postgres/database.go b/consumers/notifiers/postgres/database.go index c0e2a17051..d07ff03917 100644 --- a/consumers/notifiers/postgres/database.go +++ b/consumers/notifiers/postgres/database.go @@ -6,15 +6,18 @@ package postgres import ( "context" "database/sql" + "fmt" "github.com/jmoiron/sqlx" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var _ Database = (*database)(nil) type database struct { - db *sqlx.DB + db *sqlx.DB + tracer trace.Tracer } // Database provides a database interface @@ -26,38 +29,46 @@ type Database interface { } // NewDatabase creates a SubscriptionsDatabase instance -func NewDatabase(db *sqlx.DB) Database { +func NewDatabase(db *sqlx.DB, tracer trace.Tracer) Database { return &database{ - db: db, + db: db, + tracer: tracer, } } func (dm database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { - addSpanTags(ctx, query) + ctx, span := dm.addSpanTags(ctx, "NamedExecContext", query) + defer span.End() return dm.db.NamedExecContext(ctx, query, args) } func (dm database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { - addSpanTags(ctx, query) + ctx, span := dm.addSpanTags(ctx, "QueryRowxContext", query) + defer span.End() return dm.db.QueryRowxContext(ctx, query, args...) } func (dm database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { - addSpanTags(ctx, query) + ctx, span := dm.addSpanTags(ctx, "NamedQueryContext", query) + defer span.End() return dm.db.NamedQueryContext(ctx, query, args) } func (dm database) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - addSpanTags(ctx, query) + ctx, span := dm.addSpanTags(ctx, "GetContext", query) + defer span.End() return dm.db.GetContext(ctx, dest, query, args...) } -func addSpanTags(ctx context.Context, query string) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("sql.statement", query) - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } +func (dm database) addSpanTags(ctx context.Context, method, query string) (context.Context, trace.Span) { + ctx, span := dm.tracer.Start(ctx, + fmt.Sprintf("sql_%s", method), + trace.WithAttributes( + attribute.String("sql.statement", query), + attribute.String("span.kind", "client"), + attribute.String("peer.service", "postgres"), + attribute.String("db.type", "sql"), + ), + ) + return ctx, span } diff --git a/consumers/notifiers/postgres/setup_test.go b/consumers/notifiers/postgres/setup_test.go index 2ee238c446..fa94d02008 100644 --- a/consumers/notifiers/postgres/setup_test.go +++ b/consumers/notifiers/postgres/setup_test.go @@ -11,6 +11,7 @@ import ( "os" "testing" + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access "github.com/jmoiron/sqlx" "github.com/mainflux/mainflux/consumers/notifiers/postgres" pgClient "github.com/mainflux/mainflux/internal/clients/postgres" diff --git a/consumers/notifiers/postgres/subscriptions_test.go b/consumers/notifiers/postgres/subscriptions_test.go index fee0750189..4e4eb7af38 100644 --- a/consumers/notifiers/postgres/subscriptions_test.go +++ b/consumers/notifiers/postgres/subscriptions_test.go @@ -13,6 +13,7 @@ import ( "github.com/mainflux/mainflux/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" ) const ( @@ -20,8 +21,10 @@ const ( numSubs = 100 ) +var tracer = otel.Tracer("tests") + func TestSave(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) + dbMiddleware := postgres.NewDatabase(db, tracer) repo := postgres.New(dbMiddleware) id1, err := idProvider.ID() @@ -69,7 +72,7 @@ func TestSave(t *testing.T) { } func TestView(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) + dbMiddleware := postgres.NewDatabase(db, tracer) repo := postgres.New(dbMiddleware) id, err := idProvider.ID() @@ -118,7 +121,7 @@ func TestRetrieveAll(t *testing.T) { _, err := db.Exec("DELETE FROM subscriptions") require.Nil(t, err, fmt.Sprintf("cleanup must not fail: %s", err)) - dbMiddleware := postgres.NewDatabase(db) + dbMiddleware := postgres.NewDatabase(db, tracer) repo := postgres.New(dbMiddleware) var subs []notifiers.Subscription @@ -222,7 +225,7 @@ func TestRetrieveAll(t *testing.T) { } func TestRemove(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) + dbMiddleware := postgres.NewDatabase(db, tracer) repo := postgres.New(dbMiddleware) id, err := idProvider.ID() require.Nil(t, err, fmt.Sprintf("got an error creating id: %s", err)) diff --git a/consumers/notifiers/service.go b/consumers/notifiers/service.go index 32598d961d..baaad1261d 100644 --- a/consumers/notifiers/service.go +++ b/consumers/notifiers/service.go @@ -11,6 +11,7 @@ import ( "github.com/mainflux/mainflux/consumers" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/users/policies" ) var ( @@ -41,7 +42,7 @@ type Service interface { var _ Service = (*notifierService)(nil) type notifierService struct { - auth mainflux.AuthServiceClient + auth policies.AuthServiceClient subs SubscriptionsRepository idp mainflux.IDProvider notifier Notifier @@ -50,7 +51,7 @@ type notifierService struct { } // New instantiates the subscriptions service implementation. -func New(auth mainflux.AuthServiceClient, subs SubscriptionsRepository, idp mainflux.IDProvider, notifier Notifier, from string) Service { +func New(auth policies.AuthServiceClient, subs SubscriptionsRepository, idp mainflux.IDProvider, notifier Notifier, from string) Service { return ¬ifierService{ auth: auth, subs: subs, @@ -62,7 +63,7 @@ func New(auth mainflux.AuthServiceClient, subs SubscriptionsRepository, idp main } func (ns *notifierService) CreateSubscription(ctx context.Context, token string, sub Subscription) (string, error) { - res, err := ns.auth.Identify(ctx, &mainflux.Token{Value: token}) + res, err := ns.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return "", err } @@ -76,7 +77,7 @@ func (ns *notifierService) CreateSubscription(ctx context.Context, token string, } func (ns *notifierService) ViewSubscription(ctx context.Context, token, id string) (Subscription, error) { - if _, err := ns.auth.Identify(ctx, &mainflux.Token{Value: token}); err != nil { + if _, err := ns.auth.Identify(ctx, &policies.Token{Value: token}); err != nil { return Subscription{}, err } @@ -84,7 +85,7 @@ func (ns *notifierService) ViewSubscription(ctx context.Context, token, id strin } func (ns *notifierService) ListSubscriptions(ctx context.Context, token string, pm PageMetadata) (Page, error) { - if _, err := ns.auth.Identify(ctx, &mainflux.Token{Value: token}); err != nil { + if _, err := ns.auth.Identify(ctx, &policies.Token{Value: token}); err != nil { return Page{}, err } @@ -92,7 +93,7 @@ func (ns *notifierService) ListSubscriptions(ctx context.Context, token string, } func (ns *notifierService) RemoveSubscription(ctx context.Context, token, id string) error { - if _, err := ns.auth.Identify(ctx, &mainflux.Token{Value: token}); err != nil { + if _, err := ns.auth.Identify(ctx, &policies.Token{Value: token}); err != nil { return err } diff --git a/consumers/notifiers/smpp/README.md b/consumers/notifiers/smpp/README.md index 93c9efa262..ad9a6ba1a9 100644 --- a/consumers/notifiers/smpp/README.md +++ b/consumers/notifiers/smpp/README.md @@ -34,8 +34,8 @@ default values. | MF_SMPP_DST_ADDR_TON | SMPP destination address TON | | | MF_SMPP_SRC_ADDR_NPI | SMPP source address NPI | | | MF_SMPP_DST_ADDR_NPI | SMPP destination address NPI | | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | -| MF_AUTH_CLIENT_TLS | Auth client TLS flag | false | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | +| MF_AUTH_CLIENT_TLS | Users client TLS flag | false | | MF_AUTH_CA_CERTS | Path to Auth client CA certs in pem format | | ## Usage diff --git a/consumers/notifiers/smtp/README.md b/consumers/notifiers/smtp/README.md index 35cee3f642..2bc06ab357 100644 --- a/consumers/notifiers/smtp/README.md +++ b/consumers/notifiers/smtp/README.md @@ -33,8 +33,8 @@ default values. | MF_EMAIL_FROM_ADDRESS | Email "from" address | | | MF_EMAIL_FROM_NAME | Email "from" name | | | MF_EMAIL_TEMPLATE | Email template for sending notification emails | email.tmpl | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | | MF_AUTH_CLIENT_TLS | Auth client TLS flag | false | | MF_AUTH_CA_CERTS | Path to Auth client CA certs in pem format | | diff --git a/consumers/notifiers/smtp/notifier.go b/consumers/notifiers/smtp/notifier.go index cf4232e65e..ebf27bcee9 100644 --- a/consumers/notifiers/smtp/notifier.go +++ b/consumers/notifiers/smtp/notifier.go @@ -36,5 +36,5 @@ func (n *notifier) Notify(from string, to []string, msg *messaging.Message) erro values := string(msg.Payload) content := fmt.Sprintf(contentTemplate, msg.Publisher, msg.Protocol, values) - return n.agent.Send(to, from, subject, "", content, footer) + return n.agent.Send(to, from, subject, "", "", content, footer) } diff --git a/consumers/notifiers/tracing/notifier.go b/consumers/notifiers/tracing/notifier.go deleted file mode 100644 index 49fc749307..0000000000 --- a/consumers/notifiers/tracing/notifier.go +++ /dev/null @@ -1,33 +0,0 @@ -package tracing - -import ( - notifiers "github.com/mainflux/mainflux/consumers/notifiers" - "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -const notifierOP = "notifier_op" - -var _ notifiers.Notifier = (*serviceMiddleware)(nil) - -type serviceMiddleware struct { - svc notifiers.Notifier - tracer opentracing.Tracer -} - -// NewNotifier creates a new notifier tracing middleware service. -func NewNotifier(tracer opentracing.Tracer, svc notifiers.Notifier) notifiers.Notifier { - return &serviceMiddleware{ - svc: svc, - tracer: tracer, - } -} - -// Notify traces notify operations. -func (sm *serviceMiddleware) Notify(from string, to []string, msg *messaging.Message) error { - span := sm.tracer.StartSpan(notifierOP, ext.SpanKindConsumer) - ext.MessageBusDestination.Set(span, msg.Subtopic) - defer span.Finish() - return sm.svc.Notify(from, to, msg) -} diff --git a/consumers/notifiers/tracing/subscriptions.go b/consumers/notifiers/tracing/subscriptions.go index f165b8e6bf..d96d42a5a6 100644 --- a/consumers/notifiers/tracing/subscriptions.go +++ b/consumers/notifiers/tracing/subscriptions.go @@ -9,7 +9,7 @@ import ( "context" notifiers "github.com/mainflux/mainflux/consumers/notifiers" - opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) const ( @@ -22,13 +22,13 @@ const ( var _ notifiers.SubscriptionsRepository = (*subRepositoryMiddleware)(nil) type subRepositoryMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer repo notifiers.SubscriptionsRepository } // New instantiates a new Subscriptions repository that // tracks request and their latency, and adds spans to context. -func New(tracer opentracing.Tracer, repo notifiers.SubscriptionsRepository) notifiers.SubscriptionsRepository { +func New(tracer trace.Tracer, repo notifiers.SubscriptionsRepository) notifiers.SubscriptionsRepository { return subRepositoryMiddleware{ tracer: tracer, repo: repo, @@ -36,43 +36,29 @@ func New(tracer opentracing.Tracer, repo notifiers.SubscriptionsRepository) noti } func (urm subRepositoryMiddleware) Save(ctx context.Context, sub notifiers.Subscription) (string, error) { - span := createSpan(ctx, urm.tracer, saveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := urm.tracer.Start(ctx, saveOp) + defer span.End() return urm.repo.Save(ctx, sub) } func (urm subRepositoryMiddleware) Retrieve(ctx context.Context, id string) (notifiers.Subscription, error) { - span := createSpan(ctx, urm.tracer, retrieveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := urm.tracer.Start(ctx, retrieveOp) + defer span.End() return urm.repo.Retrieve(ctx, id) } func (urm subRepositoryMiddleware) RetrieveAll(ctx context.Context, pm notifiers.PageMetadata) (notifiers.Page, error) { - span := createSpan(ctx, urm.tracer, retrieveAllOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := urm.tracer.Start(ctx, retrieveAllOp) + defer span.End() return urm.repo.RetrieveAll(ctx, pm) } func (urm subRepositoryMiddleware) Remove(ctx context.Context, id string) error { - span := createSpan(ctx, urm.tracer, removeOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := urm.tracer.Start(ctx, removeOp) + defer span.End() return urm.repo.Remove(ctx, id) } - -func createSpan(ctx context.Context, tracer opentracing.Tracer, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tracer.StartSpan(opName) -} diff --git a/consumers/writers/api/logging.go b/consumers/writers/api/logging.go index 20a3296a2b..854012844f 100644 --- a/consumers/writers/api/logging.go +++ b/consumers/writers/api/logging.go @@ -10,18 +10,18 @@ import ( "time" "github.com/mainflux/mainflux/consumers" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" ) var _ consumers.BlockingConsumer = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger consumer consumers.BlockingConsumer } // LoggingMiddleware adds logging facilities to the adapter. -func LoggingMiddleware(consumer consumers.BlockingConsumer, logger log.Logger) consumers.BlockingConsumer { +func LoggingMiddleware(consumer consumers.BlockingConsumer, logger mflog.Logger) consumers.BlockingConsumer { return &loggingMiddleware{ logger: logger, consumer: consumer, diff --git a/consumers/writers/cassandra/setup_test.go b/consumers/writers/cassandra/setup_test.go index f66ba8b3b7..c5c60cfdc8 100644 --- a/consumers/writers/cassandra/setup_test.go +++ b/consumers/writers/cassandra/setup_test.go @@ -10,11 +10,11 @@ import ( "github.com/gocql/gocql" casClient "github.com/mainflux/mainflux/internal/clients/cassandra" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" dockertest "github.com/ory/dockertest/v3" ) -var logger, _ = log.New(os.Stdout, log.Info.String()) +var logger, _ = mflog.New(os.Stdout, mflog.Info.String()) func TestMain(m *testing.M) { pool, err := dockertest.NewPool("") diff --git a/consumers/writers/influxdb/consumer_test.go b/consumers/writers/influxdb/consumer_test.go index 2e505332d2..3b317e6973 100644 --- a/consumers/writers/influxdb/consumer_test.go +++ b/consumers/writers/influxdb/consumer_test.go @@ -14,7 +14,7 @@ import ( influxdata "github.com/influxdata/influxdb-client-go/v2" writer "github.com/mainflux/mainflux/consumers/writers/influxdb" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/transformers/json" "github.com/mainflux/mainflux/pkg/transformers/senml" "github.com/mainflux/mainflux/pkg/uuid" @@ -24,7 +24,7 @@ import ( const valueFields = 5 var ( - testLog, _ = log.New(os.Stdout, log.Info.String()) + testLog, _ = mflog.New(os.Stdout, mflog.Info.String()) streamsSize = 250 rowCountSenml = fmt.Sprintf(`from(bucket: "%s") |> range(start: -1h, stop: 1h) diff --git a/consumers/writers/mongodb/consumer_test.go b/consumers/writers/mongodb/consumer_test.go index 31d2f097f5..a8cb7e7453 100644 --- a/consumers/writers/mongodb/consumer_test.go +++ b/consumers/writers/mongodb/consumer_test.go @@ -18,7 +18,7 @@ import ( "github.com/mainflux/mainflux/pkg/transformers/json" "github.com/mainflux/mainflux/pkg/transformers/senml" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -27,7 +27,7 @@ import ( var ( port string addr string - testLog, _ = log.New(os.Stdout, log.Info.String()) + testLog, _ = mflog.New(os.Stdout, mflog.Info.String()) testDB = "test" collection = "messages" msgsNum = 100 diff --git a/docker/.env b/docker/.env index 45138fb630..2571b001a7 100644 --- a/docker/.env +++ b/docker/.env @@ -31,45 +31,34 @@ MF_JAEGER_PORT=6831 MF_JAEGER_FRONTEND=16686 MF_JAEGER_COLLECTOR=14268 MF_JAEGER_CONFIGS=5778 -MF_JAEGER_URL=jaeger:6831 +MF_JAEGER_URL=http://jaeger:14268/api/traces ## Core Services -### Auth -MF_AUTH_LOG_LEVEL=debug -MF_AUTH_HTTP_PORT=9020 -MF_AUTH_GRPC_PORT=7001 -MF_AUTH_GRPC_URL=auth:7001 -MF_AUTH_GRPC_TIMEOUT=1s -MF_AUTH_DB_PORT=5432 -MF_AUTH_DB_USER=mainflux -MF_AUTH_DB_PASS=mainflux -MF_AUTH_DB=auth -MF_AUTH_SECRET=secret -MF_AUTH_LOGIN_TOKEN_DURATION=10h - -### Keto -MF_KETO_READ_REMOTE_HOST=mainflux-keto -MF_KETO_READ_REMOTE_PORT=4466 -MF_KETO_WRITE_REMOTE_HOST=mainflux-keto -MF_KETO_WRITE_REMOTE_PORT=4467 -MF_KETO_DB_PORT=5432 -MF_KETO_DB_USER=mainflux -MF_KETO_DB_PASS=mainflux -MF_KETO_DB=keto - ### Users MF_USERS_LOG_LEVEL=debug -MF_USERS_HTTP_PORT=9002 +MF_USERS_DB_HOST=clients-db MF_USERS_DB_PORT=5432 MF_USERS_DB_USER=mainflux MF_USERS_DB_PASS=mainflux MF_USERS_DB=users +MF_USERS_DB_SSL_MODE= +MF_USERS_DB_SSL_CERT= +MF_USERS_DB_SSL_KEY= +MF_USERS_DB_SSL_ROOT_CERT= +MF_USERS_HTTP_PORT=9002 +MF_USERS_GRPC_PORT=7001 +MF_USERS_GRPC_URL=users:7001 +MF_USERS_GRPC_TIMEOUT=1s +MF_USERS_SERVER_CERT= +MF_USERS_SERVER_KEY= +MF_USERS_SECRET_KEY=HyE2D4RUt9nnKG6v8zKEqAp6g6ka8hhZsqUpzgKvnwpXrNVQSH +MF_USERS_ACCESS_TOKEN_DURATION=15m +MF_USERS_REFRESH_TOKEN_DURATION=24h MF_USERS_ADMIN_EMAIL=admin@example.com MF_USERS_ADMIN_PASSWORD=12345678 MF_USERS_RESET_PWD_TEMPLATE=users.tmpl MF_USERS_PASS_REGEX=^.{8,}$$ -MF_USERS_ALLOW_SELF_REGISTER=true ### Email utility MF_EMAIL_HOST=smtp.mailtrap.io @@ -98,6 +87,8 @@ MF_THINGS_ES_URL=localhost:6379 MF_THINGS_ES_PASS= MF_THINGS_ES_DB=0 MF_THINGS_URL=http://mainflux-things:9000 +MF_THINGS_STANDALONE_ID= +MF_THINGS_STANDALONE_TOKEN= ### HTTP MF_HTTP_ADAPTER_PORT=8008 @@ -153,6 +144,7 @@ MF_PROVISION_BS_CONFIG_PROVISIONING=true MF_PROVISION_BS_AUTO_WHITELIST=true MF_PROVISION_BS_CONTENT= MF_PROVISION_CERTS_HOURS_VALID=2400h +MF_PROVISION_CERTS_RSA_BITS=2048 # Certs MF_CERTS_LOG_LEVEL=debug diff --git a/docker/addons/bootstrap/docker-compose.yml b/docker/addons/bootstrap/docker-compose.yml index 3e069fb6e5..af8e87a255 100644 --- a/docker/addons/bootstrap/docker-compose.yml +++ b/docker/addons/bootstrap/docker-compose.yml @@ -50,7 +50,7 @@ services: MF_THINGS_ES_URL: es-redis:${MF_REDIS_TCP_PORT} MF_BOOTSTRAP_ES_URL: es-redis:${MF_REDIS_TCP_PORT} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMMEOUT: ${MF_USERS_GRPC_TIMEOUT} networks: - docker_mainflux-base-net diff --git a/docker/addons/cassandra-reader/docker-compose.yml b/docker/addons/cassandra-reader/docker-compose.yml index cdd62047c9..c9e0016e3c 100644 --- a/docker/addons/cassandra-reader/docker-compose.yml +++ b/docker/addons/cassandra-reader/docker-compose.yml @@ -27,8 +27,8 @@ services: MF_JAEGER_URL: ${MF_JAEGER_URL} MF_THINGS_AUTH_GRPC_URL: ${MF_THINGS_AUTH_GRPC_URL} MF_THINGS_AUTH_GRPC_TIMEOUT: ${MF_THINGS_AUTH_GRPC_TIMEOUT} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT : ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT : ${MF_USERS_GRPC_TIMEOUT} ports: - ${MF_CASSANDRA_READER_PORT}:${MF_CASSANDRA_READER_PORT} networks: diff --git a/docker/addons/certs/docker-compose.yml b/docker/addons/certs/docker-compose.yml index d0bb00c768..d3b6d9b579 100644 --- a/docker/addons/certs/docker-compose.yml +++ b/docker/addons/certs/docker-compose.yml @@ -67,8 +67,8 @@ services: MF_VAULT_PKI_PATH: ${MF_VAULT_PKI_PATH} MF_THINGS_URL: ${MF_THINGS_URL} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} MF_CERTS_VAULT_HOST: ${MF_CERTS_VAULT_HOST} volumes: - ../../ssl/certs/ca.key:/etc/ssl/certs/ca.key diff --git a/docker/addons/influxdb-reader/docker-compose.yml b/docker/addons/influxdb-reader/docker-compose.yml index b6659b0692..af0b5aec8d 100644 --- a/docker/addons/influxdb-reader/docker-compose.yml +++ b/docker/addons/influxdb-reader/docker-compose.yml @@ -35,8 +35,8 @@ services: MF_JAEGER_URL: ${MF_JAEGER_URL} MF_THINGS_AUTH_GRPC_URL: ${MF_THINGS_AUTH_GRPC_URL} MF_THINGS_AUTH_GRPC_TIMEOUT: ${MF_THINGS_AUTH_GRPC_TIMEOUT} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT : ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT : ${MF_USERS_GRPC_TIMEOUT} ports: - ${MF_INFLUX_READER_PORT}:${MF_INFLUX_READER_PORT} networks: diff --git a/docker/addons/mongodb-reader/docker-compose.yml b/docker/addons/mongodb-reader/docker-compose.yml index 7f66f077d3..92e8bee6b4 100644 --- a/docker/addons/mongodb-reader/docker-compose.yml +++ b/docker/addons/mongodb-reader/docker-compose.yml @@ -29,8 +29,8 @@ services: MF_JAEGER_URL: ${MF_JAEGER_URL} MF_THINGS_AUTH_GRPC_URL: ${MF_THINGS_AUTH_GRPC_URL} MF_THINGS_AUTH_GRPC_TIMEOUT: ${MF_THINGS_AUTH_GRPC_TIMEOUT} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT : ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT : ${MF_USERS_GRPC_TIMEOUT} ports: - ${MF_MONGO_READER_PORT}:${MF_MONGO_READER_PORT} networks: diff --git a/docker/addons/postgres-reader/docker-compose.yml b/docker/addons/postgres-reader/docker-compose.yml index 67cc87f8ce..32e163cc58 100644 --- a/docker/addons/postgres-reader/docker-compose.yml +++ b/docker/addons/postgres-reader/docker-compose.yml @@ -35,8 +35,8 @@ services: MF_JAEGER_URL: ${MF_JAEGER_URL} MF_THINGS_AUTH_GRPC_URL: ${MF_THINGS_AUTH_GRPC_URL} MF_THINGS_AUTH_GRPC_TIMEOUT: ${MF_THINGS_AUTH_GRPC_TIMEOUT} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT : ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT : ${MF_USERS_GRPC_TIMEOUT} ports: - ${MF_POSTGRES_READER_PORT}:${MF_POSTGRES_READER_PORT} networks: diff --git a/docker/addons/smpp-notifier/docker-compose.yml b/docker/addons/smpp-notifier/docker-compose.yml index fbdfb5c2e0..cd70eca5b5 100644 --- a/docker/addons/smpp-notifier/docker-compose.yml +++ b/docker/addons/smpp-notifier/docker-compose.yml @@ -38,8 +38,8 @@ services: environment: MF_BROKER_URL: ${MF_BROKER_URL} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} MF_SMPP_NOTIFIER_LOG_LEVEL: ${MF_SMPP_NOTIFIER_LOG_LEVEL} MF_SMPP_NOTIFIER_DB_HOST: smpp-notifier-db MF_SMPP_NOTIFIER_DB_PORT: ${MF_SMPP_NOTIFIER_DB_PORT} diff --git a/docker/addons/smtp-notifier/docker-compose.yml b/docker/addons/smtp-notifier/docker-compose.yml index 315a1742eb..bee633ab12 100644 --- a/docker/addons/smtp-notifier/docker-compose.yml +++ b/docker/addons/smtp-notifier/docker-compose.yml @@ -45,8 +45,8 @@ services: MF_SMTP_NOTIFIER_PORT: ${MF_SMTP_NOTIFIER_PORT} MF_BROKER_URL: ${MF_BROKER_URL} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} MF_EMAIL_USERNAME: ${MF_EMAIL_USERNAME} MF_EMAIL_PASSWORD: ${MF_EMAIL_PASSWORD} MF_EMAIL_HOST: ${MF_EMAIL_HOST} diff --git a/docker/addons/twins/docker-compose.yml b/docker/addons/twins/docker-compose.yml index 675bda22a5..6ae29cf670 100644 --- a/docker/addons/twins/docker-compose.yml +++ b/docker/addons/twins/docker-compose.yml @@ -44,12 +44,13 @@ services: MF_TWINS_DB_PORT: ${MF_TWINS_DB_PORT} MF_TWINS_CHANNEL_ID: ${MF_TWINS_CHANNEL_ID} MF_BROKER_URL: ${MF_BROKER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} MF_TWINS_CACHE_URL: ${MF_TWINS_CACHE_URL} MF_TWINS_CACHE_PASS: ${MF_TWINS_CACHE_PASS} MF_TWINS_CACHE_DB: ${MF_TWINS_CACHE_DB} - + MF_THINGS_STANDALONE_ID: ${MF_THINGS_STANDALONE_ID} + MF_THINGS_STANDALONE_TOKEN: ${MF_THINGS_STANDALONE_TOKEN} ports: - ${MF_TWINS_HTTP_PORT}:${MF_TWINS_HTTP_PORT} networks: diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index c73fa90da4..2838e54464 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -8,65 +8,13 @@ networks: driver: bridge volumes: - mainflux-auth-db-volume: mainflux-users-db-volume: mainflux-things-db-volume: - mainflux-keto-db-volume: - mainflux-auth-redis-volume: + mainflux-things-redis-volume: mainflux-es-redis-volume: mainflux-mqtt-broker-volume: services: - keto: - image: oryd/keto:v0.6.0-alpha.3 - container_name: mainflux-keto - ports: - - ${MF_KETO_READ_REMOTE_PORT}:${MF_KETO_READ_REMOTE_PORT} - - ${MF_KETO_WRITE_REMOTE_PORT}:${MF_KETO_WRITE_REMOTE_PORT} - environment: - - DSN=postgresql://${MF_KETO_DB_USER}:${MF_KETO_DB_PASS}@keto-db:${MF_KETO_DB_PORT}/${MF_KETO_DB}?sslmode=disable - command: serve -c /home/ory/keto.yml - restart: on-failure - volumes: - - type: bind - source: ./keto - target: /home/ory - networks: - - mainflux-base-net - depends_on: - - keto-db - - keto-migrate: - image: oryd/keto:v0.6.0-alpha.3 - container_name: mainflux-keto-migrate - environment: - - KETO_WRITE_REMOTE=keto:${MF_KETO_WRITE_REMOTE_PORT} - - KETO_READ_REMOTE=keto:${MF_KETO_READ_REMOTE_PORT} - - DSN=postgresql://${MF_KETO_DB_USER}:${MF_KETO_DB_PASS}@keto-db:${MF_KETO_DB_PORT}/${MF_KETO_DB}?sslmode=disable - volumes: - - type: bind - source: ./keto - target: /home/ory - command: migrate up --all-namespaces -c /home/ory/keto.yml --yes - restart: on-failure - networks: - - mainflux-base-net - depends_on: - - keto-db - - keto-db: - image: postgres:13.3-alpine - container_name: mainflux-keto-db - restart: on-failure - environment: - POSTGRES_USER: ${MF_KETO_DB_USER} - POSTGRES_PASSWORD: ${MF_KETO_DB_PASS} - POSTGRES_DB: ${MF_KETO_DB} - networks: - - mainflux-base-net - volumes: - - mainflux-keto-db-volume:/var/lib/postgresql/data - nginx: image: nginx:1.23.3-alpine container_name: mainflux-nginx @@ -107,52 +55,52 @@ services: ports: - 4222:4222 - auth-db: + things-db: image: postgres:13.3-alpine - container_name: mainflux-auth-db + container_name: mainflux-things-db restart: on-failure environment: - POSTGRES_USER: ${MF_AUTH_DB_USER} - POSTGRES_PASSWORD: ${MF_AUTH_DB_PASS} - POSTGRES_DB: ${MF_AUTH_DB} + POSTGRES_USER: ${MF_THINGS_DB_USER} + POSTGRES_PASSWORD: ${MF_THINGS_DB_PASS} + POSTGRES_DB: ${MF_THINGS_DB} networks: - mainflux-base-net volumes: - - mainflux-auth-db-volume:/var/lib/postgresql/data + - mainflux-things-db-volume:/var/lib/postgresql/data - auth: - image: mainflux/auth:${MF_RELEASE_TAG} - container_name: mainflux-auth + things: + image: mainflux/things:${MF_RELEASE_TAG} + container_name: mainflux-things depends_on: - - auth-db - - keto - expose: - - ${MF_AUTH_GRPC_PORT} + - things-db + - users restart: on-failure environment: - MF_AUTH_LOG_LEVEL: ${MF_AUTH_LOG_LEVEL} - MF_AUTH_DB_HOST: auth-db - MF_AUTH_DB_PORT: ${MF_AUTH_DB_PORT} - MF_AUTH_DB_USER: ${MF_AUTH_DB_USER} - MF_AUTH_DB_PASS: ${MF_AUTH_DB_PASS} - MF_AUTH_DB: ${MF_AUTH_DB} - MF_AUTH_HTTP_PORT: ${MF_AUTH_HTTP_PORT} - MF_AUTH_GRPC_PORT: ${MF_AUTH_GRPC_PORT} - MF_AUTH_SECRET: ${MF_AUTH_SECRET} - MF_AUTH_LOGIN_TOKEN_DURATION: ${MF_AUTH_LOGIN_TOKEN_DURATION} + MF_THINGS_LOG_LEVEL: ${MF_THINGS_LOG_LEVEL} + MF_THINGS_DB_HOST: things-db + MF_THINGS_DB_PORT: ${MF_THINGS_DB_PORT} + MF_THINGS_DB_USER: ${MF_THINGS_DB_USER} + MF_THINGS_DB_PASS: ${MF_THINGS_DB_PASS} + MF_THINGS_DB: ${MF_THINGS_DB} + MF_THINGS_CACHE_URL: things-redis:${MF_REDIS_TCP_PORT} + MF_THINGS_ES_URL: es-redis:${MF_REDIS_TCP_PORT} + MF_THINGS_HTTP_PORT: ${MF_THINGS_HTTP_PORT} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_KETO_READ_REMOTE_HOST: ${MF_KETO_READ_REMOTE_HOST} - MF_KETO_READ_REMOTE_PORT: ${MF_KETO_READ_REMOTE_PORT} - MF_KETO_WRITE_REMOTE_HOST: ${MF_KETO_WRITE_REMOTE_HOST} - MF_KETO_WRITE_REMOTE_PORT: ${MF_KETO_WRITE_REMOTE_PORT} + MF_THINGS_AUTH_HTTP_PORT: ${MF_THINGS_AUTH_HTTP_PORT} + MF_THINGS_AUTH_GRPC_PORT: ${MF_THINGS_AUTH_GRPC_PORT} + MF_AUTH_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_AUTH_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} + MF_THINGS_STANDALONE_ID: ${MF_THINGS_STANDALONE_ID} + MF_THINGS_STANDALONE_TOKEN: ${MF_THINGS_STANDALONE_TOKEN} ports: - - ${MF_AUTH_HTTP_PORT}:${MF_AUTH_HTTP_PORT} - - ${MF_AUTH_GRPC_PORT}:${MF_AUTH_GRPC_PORT} + - ${MF_THINGS_HTTP_PORT}:${MF_THINGS_HTTP_PORT} + - ${MF_THINGS_AUTH_HTTP_PORT}:${MF_THINGS_AUTH_HTTP_PORT} + - ${MF_THINGS_AUTH_GRPC_PORT}:${MF_THINGS_AUTH_GRPC_PORT} networks: - mainflux-base-net users-db: - image: postgres:13.3-alpine + image: postgres:15.1-alpine container_name: mainflux-users-db restart: on-failure environment: @@ -164,14 +112,20 @@ services: volumes: - mainflux-users-db-volume:/var/lib/postgresql/data + things-redis: + image: redis:6.2.2-alpine + container_name: mainflux-things-redis + restart: on-failure + networks: + - mainflux-base-net + volumes: + - mainflux-things-redis-volume:/data + users: image: mainflux/users:${MF_RELEASE_TAG} container_name: mainflux-users - volumes: - - ./templates/${MF_USERS_RESET_PWD_TEMPLATE}:/${MF_EMAIL_TEMPLATE} depends_on: - users-db - - auth restart: on-failure environment: MF_USERS_LOG_LEVEL: ${MF_USERS_LOG_LEVEL} @@ -180,79 +134,34 @@ services: MF_USERS_DB_USER: ${MF_USERS_DB_USER} MF_USERS_DB_PASS: ${MF_USERS_DB_PASS} MF_USERS_DB: ${MF_USERS_DB} + MF_USERS_DB_SSL_MODE: ${MF_USERS_DB_SSL_MODE} + MF_USERS_DB_SSL_CERT: ${MF_USERS_DB_SSL_CERT} + MF_USERS_DB_SSL_KEY: ${MF_USERS_DB_SSL_KEY} + MF_USERS_DB_SSL_ROOT_CERT: ${MF_USERS_DB_SSL_ROOT_CERT} MF_USERS_HTTP_PORT: ${MF_USERS_HTTP_PORT} + MF_USERS_GRPC_PORT: ${MF_USERS_GRPC_PORT} MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_EMAIL_HOST: ${MF_EMAIL_HOST} - MF_EMAIL_PORT: ${MF_EMAIL_PORT} - MF_EMAIL_USERNAME: ${MF_EMAIL_USERNAME} - MF_EMAIL_PASSWORD: ${MF_EMAIL_PASSWORD} - MF_EMAIL_FROM_ADDRESS: ${MF_EMAIL_FROM_ADDRESS} - MF_EMAIL_FROM_NAME: ${MF_EMAIL_FROM_NAME} - MF_EMAIL_TEMPLATE: ${MF_EMAIL_TEMPLATE} - MF_TOKEN_RESET_ENDPOINT: ${MF_TOKEN_RESET_ENDPOINT} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} + MF_USERS_GRPC_URL: ${MF_USERS_GRPC_URL} + MF_USERS_GRPC_TIMEOUT: ${MF_USERS_GRPC_TIMEOUT} + MF_USERS_SERVER_CERT: ${MF_USERS_SERVER_CERT} + MF_USERS_SERVER_KEY: ${MF_USERS_SERVER_KEY} + MF_USERS_SECRET_KEY: ${MF_USERS_SECRET_KEY} MF_USERS_ADMIN_EMAIL: ${MF_USERS_ADMIN_EMAIL} MF_USERS_ADMIN_PASSWORD: ${MF_USERS_ADMIN_PASSWORD} - MF_USERS_ALLOW_SELF_REGISTER: ${MF_USERS_ALLOW_SELF_REGISTER} + MF_USERS_PASS_REGEX: ${MF_USERS_PASS_REGEX} + MF_USERS_ACCESS_TOKEN_DURATION: ${MF_USERS_ACCESS_TOKEN_DURATION} + MF_USERS_REFRESH_TOKEN_DURATION: ${MF_USERS_REFRESH_TOKEN_DURATION} + MF_TOKEN_RESET_ENDPOINT: ${MF_TOKEN_RESET_ENDPOINT} ports: - ${MF_USERS_HTTP_PORT}:${MF_USERS_HTTP_PORT} - networks: - - mainflux-base-net - - things-db: - image: postgres:13.3-alpine - container_name: mainflux-things-db - restart: on-failure - environment: - POSTGRES_USER: ${MF_THINGS_DB_USER} - POSTGRES_PASSWORD: ${MF_THINGS_DB_PASS} - POSTGRES_DB: ${MF_THINGS_DB} - networks: - - mainflux-base-net - volumes: - - mainflux-things-db-volume:/var/lib/postgresql/data - - auth-redis: - image: redis:6.2.2-alpine - container_name: mainflux-auth-redis - restart: on-failure + - ${MF_USERS_GRPC_PORT}:${MF_USERS_GRPC_PORT} networks: - mainflux-base-net volumes: - - mainflux-auth-redis-volume:/data - - things: - image: mainflux/things:${MF_RELEASE_TAG} - container_name: mainflux-things - depends_on: - - things-db - - auth - restart: on-failure - environment: - MF_THINGS_LOG_LEVEL: ${MF_THINGS_LOG_LEVEL} - MF_THINGS_DB_HOST: things-db - MF_THINGS_DB_PORT: ${MF_THINGS_DB_PORT} - MF_THINGS_DB_USER: ${MF_THINGS_DB_USER} - MF_THINGS_DB_PASS: ${MF_THINGS_DB_PASS} - MF_THINGS_DB: ${MF_THINGS_DB} - MF_THINGS_CACHE_URL: auth-redis:${MF_REDIS_TCP_PORT} - MF_THINGS_ES_URL: es-redis:${MF_REDIS_TCP_PORT} - MF_THINGS_HTTP_PORT: ${MF_THINGS_HTTP_PORT} - MF_THINGS_AUTH_HTTP_PORT: ${MF_THINGS_AUTH_HTTP_PORT} - MF_THINGS_AUTH_GRPC_PORT: ${MF_THINGS_AUTH_GRPC_PORT} - MF_JAEGER_URL: ${MF_JAEGER_URL} - MF_AUTH_GRPC_URL: ${MF_AUTH_GRPC_URL} - MF_AUTH_GRPC_TIMEOUT: ${MF_AUTH_GRPC_TIMEOUT} - ports: - - ${MF_THINGS_HTTP_PORT}:${MF_THINGS_HTTP_PORT} - - ${MF_THINGS_AUTH_HTTP_PORT}:${MF_THINGS_AUTH_HTTP_PORT} - - ${MF_THINGS_AUTH_GRPC_PORT}:${MF_THINGS_AUTH_GRPC_PORT} - networks: - - mainflux-base-net - + - ./templates/${MF_USERS_RESET_PWD_TEMPLATE}:/email.tmpl + jaeger: - image: jaegertracing/all-in-one:1.20 + image: jaegertracing/all-in-one:1.38.0 container_name: mainflux-jaeger ports: - ${MF_JAEGER_PORT}:${MF_JAEGER_PORT}/udp @@ -296,7 +205,7 @@ services: MF_JAEGER_URL: ${MF_JAEGER_URL} MF_THINGS_AUTH_GRPC_URL: ${MF_THINGS_AUTH_GRPC_URL} MF_THINGS_AUTH_GRPC_TIMEOUT: ${MF_THINGS_AUTH_GRPC_TIMEOUT} - MF_AUTH_CACHE_URL: auth-redis:${MF_REDIS_TCP_PORT} + MF_AUTH_CACHE_URL: things-redis:${MF_REDIS_TCP_PORT} networks: - mainflux-base-net diff --git a/docker/keto/keto.yml b/docker/keto/keto.yml deleted file mode 100644 index 311161aec9..0000000000 --- a/docker/keto/keto.yml +++ /dev/null @@ -1,16 +0,0 @@ -version: v0.6.0-alpha.3 - -log: - level: debug - -namespaces: - - id: 0 - name: members - -serve: - read: - host: 0.0.0.0 - port: 4466 - write: - host: 0.0.0.0 - port: 4467 diff --git a/docker/nginx/entrypoint.sh b/docker/nginx/entrypoint.sh index bf7144623e..9deccadeda 100755 --- a/docker/nginx/entrypoint.sh +++ b/docker/nginx/entrypoint.sh @@ -16,7 +16,7 @@ envsubst ' ${MF_HTTP_ADAPTER_PORT} ${MF_NGINX_MQTT_PORT} ${MF_NGINX_MQTTS_PORT} - ${MF_AUTH_HTTP_PORT} + ${MF_CLIENTS_HTTP_PORT} ${MF_WS_ADAPTER_PORT}' < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf exec nginx -g "daemon off;" diff --git a/docker/nginx/nginx-key.conf b/docker/nginx/nginx-key.conf index 5c38db01b9..57245f657c 100644 --- a/docker/nginx/nginx-key.conf +++ b/docker/nginx/nginx-key.conf @@ -56,6 +56,11 @@ http { proxy_pass http://users:${MF_USERS_HTTP_PORT}; } + location ~ ^/(policies) { + include snippets/proxy-headers.conf; + proxy_pass http://users:${MF_USERS_HTTP_PORT}; + } + # Proxy pass to things service location ~ ^/(things|channels|connect|disconnect) { include snippets/proxy-headers.conf; @@ -66,7 +71,7 @@ http { location ~ ^/(identify){ include snippets/proxy-headers.conf; add_header Access-Control-Expose-Headers Location; - proxy_pass http://things:${MF_THINGS_AUTH_HTTP_PORT}; + proxy_pass http://things:${MF_THINGS_HTTP_PORT}; } # Proxy pass for groups to things service @@ -86,9 +91,9 @@ http { location ~ ^/(groups|members|keys) { include snippets/proxy-headers.conf; add_header Access-Control-Expose-Headers Location; - proxy_pass http://auth:${MF_AUTH_HTTP_PORT}; + proxy_pass http://users:${MF_USERS_HTTP_PORT}; } - + location /health { include snippets/proxy-headers.conf; proxy_pass http://things:${MF_THINGS_HTTP_PORT}; diff --git a/docker/nginx/nginx-x509.conf b/docker/nginx/nginx-x509.conf index 0cc8e0a21d..23f00938c6 100644 --- a/docker/nginx/nginx-x509.conf +++ b/docker/nginx/nginx-x509.conf @@ -64,7 +64,10 @@ http { proxy_pass http://users:${MF_USERS_HTTP_PORT}; } - + location ~ ^/(policies) { + include snippets/proxy-headers.conf; + proxy_pass http://users:${MF_USERS_HTTP_PORT}; + } # Proxy pass to things service location ~ ^/(things|channels|connect|disconnect) { @@ -76,7 +79,7 @@ http { location ~ ^/(identify){ include snippets/proxy-headers.conf; add_header Access-Control-Expose-Headers Location; - proxy_pass http://things:${MF_THINGS_AUTH_HTTP_PORT}; + proxy_pass http://things:${MF_THINGS_HTTP_PORT}; } # Proxy pass for groups to things service @@ -96,7 +99,7 @@ http { location ~ ^/(groups|members|keys) { include snippets/proxy-headers.conf; add_header Access-Control-Expose-Headers Location; - proxy_pass http://auth:${MF_AUTH_HTTP_PORT}; + proxy_pass http://users:${MF_USERS_HTTP_PORT}; } location /health { diff --git a/docker/ssl/Makefile b/docker/ssl/Makefile index 0c9fe71137..d2581a664d 100644 --- a/docker/ssl/Makefile +++ b/docker/ssl/Makefile @@ -5,7 +5,7 @@ OU_CRT = mainflux_crt EA = info@mainflux.com CN_CA = Mainflux_Self_Signed_CA CN_SRV = localhost -THING_KEY = # e.g. 8f65ed04-0770-4ce4-a291-6d1bf2000f4d +THING_SECRET = # e.g. 8f65ed04-0770-4ce4-a291-6d1bf2000f4d CRT_FILE_NAME = thing all: clean_certs ca server_cert @@ -30,7 +30,7 @@ server_cert: thing_cert: # Create mainflux server key and CSR. openssl req -new -sha256 -newkey rsa:4096 -nodes -keyout $(CRT_LOCATION)/$(CRT_FILE_NAME).key \ - -out $(CRT_LOCATION)/$(CRT_FILE_NAME).csr -subj "/CN=$(THING_KEY)/O=$(O)/OU=$(OU_CRT)/emailAddress=$(EA)" + -out $(CRT_LOCATION)/$(CRT_FILE_NAME).csr -subj "/CN=$(THING_SECRET)/O=$(O)/OU=$(OU_CRT)/emailAddress=$(EA)" # Sign client CSR. openssl x509 -req -days 730 -in $(CRT_LOCATION)/$(CRT_FILE_NAME).csr -CA $(CRT_LOCATION)/ca.crt -CAkey $(CRT_LOCATION)/ca.key -CAcreateserial -out $(CRT_LOCATION)/$(CRT_FILE_NAME).crt diff --git a/docker/templates/users.tmpl b/docker/templates/users.tmpl index 9ac08572dc..642dae74dd 100644 --- a/docker/templates/users.tmpl +++ b/docker/templates/users.tmpl @@ -1,9 +1,13 @@ -To: {{range $index, $v := .To}}{{if $index}},{{end}}{{$v}}{{end}} -From: {{.From}} -Subject: {{.Subject}} -{{.Header}} -You have initiated password reset. -Follow the link below to reset password. +Dear {{.User}}, + +We have received a request to reset your password for your account on {{.Host}}. To proceed with resetting your password, please click on the link below: + {{.Content}} -{{.Footer}} +If you did not initiate this request, please disregard this message and your password will remain unchanged. + +Thank you for using {{.Host}}. + +Best regards, + +{{.Footer}} diff --git a/go.mod b/go.mod index ed6c893ebd..4103f4dd27 100644 --- a/go.mod +++ b/go.mod @@ -15,17 +15,19 @@ require ( github.com/go-zoo/bone v1.3.0 github.com/gocql/gocql v1.2.1 github.com/gofrs/uuid v4.3.0+incompatible - github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/golang/protobuf v1.5.2 + github.com/gookit/color v1.5.3 + github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e github.com/gopcua/opcua v0.1.6 github.com/gorilla/websocket v1.5.0 github.com/hashicorp/vault/api v1.8.1 github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f github.com/influxdata/influxdb-client-go/v2 v2.12.3 + github.com/ivanpirog/coloredcobra v1.0.1 github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa github.com/jackc/pgtype v1.13.0 github.com/jackc/pgx/v5 v5.2.0 github.com/jmoiron/sqlx v1.3.5 + github.com/lestrrat-go/jwx/v2 v2.0.8 github.com/mainflux/mproxy v0.3.0 github.com/mainflux/senml v1.5.0 github.com/mitchellh/mapstructure v1.5.0 @@ -33,7 +35,6 @@ require ( github.com/oklog/ulid/v2 v2.1.0 github.com/opentracing/opentracing-go v1.2.0 github.com/ory/dockertest/v3 v3.9.1 - github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9 github.com/pelletier/go-toml v1.9.5 github.com/plgd-dev/go-coap/v2 v2.6.0 github.com/prometheus/client_golang v1.13.0 @@ -41,15 +42,21 @@ require ( github.com/rubenv/sql-migrate v1.2.0 github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/subosito/gotenv v1.4.1 - github.com/uber/jaeger-client-go v2.30.0+incompatible go.mongodb.org/mongo-driver v1.10.3 + go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit v0.38.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 + go.opentelemetry.io/contrib/propagators/jaeger v1.15.0 + go.opentelemetry.io/otel v1.14.0 + go.opentelemetry.io/otel/exporters/jaeger v1.12.0 + go.opentelemetry.io/otel/sdk v1.12.0 + go.opentelemetry.io/otel/trace v1.14.0 golang.org/x/crypto v0.5.0 golang.org/x/net v0.10.0 golang.org/x/sync v0.1.0 gonum.org/v1/gonum v0.12.0 - google.golang.org/grpc v1.50.1 + google.golang.org/grpc v1.53.0 google.golang.org/protobuf v1.28.1 gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df ) @@ -62,9 +69,10 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/cli v20.10.21+incompatible // indirect @@ -75,7 +83,11 @@ require ( github.com/fxamacker/cbor/v2 v2.4.0 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/goccy/go-json v0.9.11 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect @@ -104,6 +116,11 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect github.com/klauspost/compress v1.15.11 // indirect + github.com/lestrrat-go/blackmagic v1.0.1 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.4 // indirect + github.com/lestrrat-go/iter v1.0.2 // indirect + github.com/lestrrat-go/option v1.0.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect @@ -138,7 +155,7 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.1 // indirect @@ -146,14 +163,16 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + go.opentelemetry.io/otel/metric v0.37.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index ea8e755133..b7deaa1923 100644 --- a/go.sum +++ b/go.sum @@ -20,12 +20,15 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= @@ -45,7 +48,6 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= @@ -86,8 +88,9 @@ github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8 github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -109,6 +112,7 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= @@ -118,6 +122,9 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= @@ -184,6 +191,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ocf/go-coap/v2 v2.0.4-0.20200728125043-f38b86f047a7/go.mod h1:X9wVKcaOSx7wBxKcvrWgMQq1R2DNeA7NBLW2osIb8TM= github.com/go-ocf/kit v0.0.0-20200728130040-4aebdb6982bc/go.mod h1:TIsoMT/iB7t9P6ahkcOnsmvS83SIJsv9qXRfz/yLf6M= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -204,6 +216,8 @@ github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0 github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocql/gocql v1.2.1 h1:G/STxUzD6pGvRHzG0Fi7S04SXejMKBbRZb7pwre1edU= github.com/gocql/gocql v1.2.1/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -218,8 +232,6 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -269,7 +281,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -294,6 +306,10 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gookit/color v1.5.3 h1:twfIhZs4QLCtimkP7MOxlF3A0U/5cDPseRT9M/+2SCE= +github.com/gookit/color v1.5.3/go.mod h1:NUzwzeehUfl7GIb36pqId+UGmRfQcU/WiiyTTeNjHtE= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= +github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= github.com/gopcua/opcua v0.1.6 h1:B9SVRKQGzcWcwP2QPYN93Uku32+3wL+v5cgzBxE6V5I= github.com/gopcua/opcua v0.1.6/go.mod h1:INwnDoRxmNWAt7+tzqxuGqQkSF2c1C69VAL0c2q6AcY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -383,6 +399,8 @@ github.com/influxdata/influxdb-client-go/v2 v2.12.3 h1:28nRlNMRIV4QbtIUvxhWqaxn0 github.com/influxdata/influxdb-client-go/v2 v2.12.3/go.mod h1:IrrLUbCjjfkmRuaCiGQg4m2GbkaeJDcuWoxiWdQEbA0= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/ivanpirog/coloredcobra v1.0.1 h1:aURSdEmlR90/tSiWS0dMjdwOvCVUeYLfltLfbgNxrN4= +github.com/ivanpirog/coloredcobra v1.0.1/go.mod h1:iho4nEKcnwZFiniGSdcgdvRgZNjxm+h20acv8vqmN6Q= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -472,8 +490,20 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80= +github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.4 h1:bAZymwoZQb+Oq8MEbyipag7iSq6YIga8Wj6GOiJGdI8= +github.com/lestrrat-go/httprc v1.0.4/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v0.0.0-20200422075355-fc1769541911/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= +github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= github.com/lestrrat-go/jwx v1.0.2/go.mod h1:TPF17WiSFegZo+c20fdpw49QD+/7n4/IsGvEmCSWwT0= +github.com/lestrrat-go/jwx/v2 v2.0.8 h1:jCFT8oc0hEDVjgUgsBy1F9cbjsjAVZSXNi7JaU9HR/Q= +github.com/lestrrat-go/jwx/v2 v2.0.8/go.mod h1:zLxnyv9rTlEvOUHbc48FAfIL8iYu2hHvIRaTFGc8mT0= +github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4= +github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lestrrat-go/pdebug v0.0.0-20200204225717-4d6bd78da58d/go.mod h1:B06CSso/AWxiPejj+fheUINGeBKeeEZNt8w+EoU7+L8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -593,8 +623,6 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= -github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9 h1:gP86NkMkUlqMOTjFQ8lt8T1HbHtCJGGeeeh/6c+nla0= -github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9/go.mod h1:8IoeBQqIRKWU5L6dTKQTlTwVhlUawpqSBJZWfLLN4FM= github.com/panjf2000/ants/v2 v2.4.3/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -709,6 +737,7 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= @@ -734,8 +763,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= @@ -743,10 +773,6 @@ github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -773,6 +799,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= @@ -797,6 +825,22 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit v0.38.0 h1:S3HIwNMSaEIgrHVUf0SQaeDuh/aNk7qUI5kSiz8bBvs= +go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit v0.38.0/go.mod h1:VcDVc705BPYLEpe//iIei8Q0ABqYhlHHaQm2N7OUxCc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw= +go.opentelemetry.io/contrib/propagators/jaeger v1.15.0 h1:xdJjwy5t/8I+TZehMMQ+r2h50HREihH2oMUhimQ+jug= +go.opentelemetry.io/contrib/propagators/jaeger v1.15.0/go.mod h1:tU0nwW4QTvKceNUP60/PQm0FI8zDSwey7gIFt3RR/yw= +go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel/exporters/jaeger v1.12.0 h1:1Vy11S0iAD70EPfcP3N2f2IhLq/cIuTW+Zt010MswR8= +go.opentelemetry.io/otel/exporters/jaeger v1.12.0/go.mod h1:SCLbaspEoU9mGJZB6ksc2iSGU6CLWY5yefchDqOM0IM= +go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= +go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= +go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -839,6 +883,7 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= @@ -949,6 +994,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1175,6 +1221,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1218,10 +1265,9 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210503173045-b96a97608f20/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71 h1:GEgb2jF5zxsFJpJfg9RoDDWm7tiwc/DDSTE2BtLUkXU= -google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1241,10 +1287,9 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/http/adapter.go b/http/adapter.go index b0e13699f9..29d3cf3382 100644 --- a/http/adapter.go +++ b/http/adapter.go @@ -8,8 +8,9 @@ package http import ( "context" - "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/things/policies" ) // Service specifies coap service API. @@ -22,11 +23,11 @@ var _ Service = (*adapterService)(nil) type adapterService struct { publisher messaging.Publisher - things mainflux.ThingsServiceClient + things policies.ThingsServiceClient } // New instantiates the HTTP adapter implementation. -func New(publisher messaging.Publisher, things mainflux.ThingsServiceClient) Service { +func New(publisher messaging.Publisher, things policies.ThingsServiceClient) Service { return &adapterService{ publisher: publisher, things: things, @@ -34,15 +35,20 @@ func New(publisher messaging.Publisher, things mainflux.ThingsServiceClient) Ser } func (as *adapterService) Publish(ctx context.Context, token string, msg *messaging.Message) error { - ar := &mainflux.AccessByKeyReq{ - Token: token, - ChanID: msg.Channel, + ar := &policies.AuthorizeReq{ + Sub: token, + Obj: msg.Channel, + Act: policies.WriteAction, + EntityType: policies.GroupEntityType, } - thid, err := as.things.CanAccessByKey(ctx, ar) + res, err := as.things.Authorize(ctx, ar) if err != nil { return err } - msg.Publisher = thid.GetValue() + if !res.GetAuthorized() { + return errors.ErrAuthorization + } + msg.Publisher = res.GetThingID() return as.publisher.Publish(ctx, msg.Channel, msg) } diff --git a/http/api/endpoint_test.go b/http/api/endpoint_test.go index 74bd55bf32..3cfff29502 100644 --- a/http/api/endpoint_test.go +++ b/http/api/endpoint_test.go @@ -11,25 +11,21 @@ import ( "strings" "testing" - "github.com/opentracing/opentracing-go/mocktracer" - - "github.com/mainflux/mainflux" adapter "github.com/mainflux/mainflux/http" "github.com/mainflux/mainflux/http/api" "github.com/mainflux/mainflux/http/mocks" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/things/policies" "github.com/stretchr/testify/assert" ) -func newService(cc mainflux.ThingsServiceClient) adapter.Service { +func newService(cc policies.ThingsServiceClient) adapter.Service { pub := mocks.NewPublisher() return adapter.New(pub, cc) } func newHTTPServer(svc adapter.Service) *httptest.Server { - logger := logger.NewMock() - mux := api.MakeHandler(svc, mocktracer.New(), logger) + mux := api.MakeHandler(svc) return httptest.NewServer(mux) } diff --git a/http/api/logging.go b/http/api/logging.go index 7b2e06c24d..8a58a7a690 100644 --- a/http/api/logging.go +++ b/http/api/logging.go @@ -11,19 +11,19 @@ import ( "time" "github.com/mainflux/mainflux/http" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" ) var _ http.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc http.Service } // LoggingMiddleware adds logging facilities to the adapter. -func LoggingMiddleware(svc http.Service, logger log.Logger) http.Service { +func LoggingMiddleware(svc http.Service, logger mflog.Logger) http.Service { return &loggingMiddleware{logger, svc} } diff --git a/http/api/transport.go b/http/api/transport.go index bb03ed9689..d73d49858c 100644 --- a/http/api/transport.go +++ b/http/api/transport.go @@ -6,23 +6,22 @@ package api import ( "context" "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "regexp" "strings" "time" - kitot "github.com/go-kit/kit/tracing/opentracing" kithttp "github.com/go-kit/kit/transport/http" "github.com/go-zoo/bone" "github.com/mainflux/mainflux" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + adapter "github.com/mainflux/mainflux/http" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" - opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus/promhttp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -42,21 +41,21 @@ var ( var channelPartRegExp = regexp.MustCompile(`^/channels/([\w\-]+)/messages(/[^?]*)?(\?.*)?$`) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc adapter.Service, tracer opentracing.Tracer, logger logger.Logger) http.Handler { +func MakeHandler(svc adapter.Service) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(encodeError), } r := bone.New() r.Post("/channels/:chanID/messages", kithttp.NewServer( - kitot.TraceServer(tracer, "publish")(sendMessageEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("publish"))(sendMessageEndpoint(svc)), decodeRequest, encodeResponse, opts..., )) r.Post("/channels/:chanID/messages/*", kithttp.NewServer( - kitot.TraceServer(tracer, "publish")(sendMessageEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("publish"))(sendMessageEndpoint(svc)), decodeRequest, encodeResponse, opts..., @@ -97,7 +96,7 @@ func parseSubtopic(subtopic string) (string, error) { return subtopic, nil } -func decodeRequest(ctx context.Context, r *http.Request) (interface{}, error) { +func decodeRequest(_ context.Context, r *http.Request) (interface{}, error) { ct := r.Header.Get("Content-Type") if ct != ctSenmlJSON && ct != ctJSON && ct != ctSenmlCBOR { return nil, errors.ErrUnsupportedContentType @@ -122,7 +121,7 @@ func decodeRequest(ctx context.Context, r *http.Request) (interface{}, error) { token = apiutil.ExtractThingKey(r) } - payload, err := ioutil.ReadAll(r.Body) + payload, err := io.ReadAll(r.Body) if err != nil { return nil, errors.ErrMalformedEntity } @@ -142,7 +141,7 @@ func decodeRequest(ctx context.Context, r *http.Request) (interface{}, error) { return req, nil } -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { +func encodeResponse(_ context.Context, w http.ResponseWriter, _ interface{}) error { w.WriteHeader(http.StatusAccepted) return nil } diff --git a/http/mocks/things.go b/http/mocks/things.go index a852b16641..76971f6fd4 100644 --- a/http/mocks/things.go +++ b/http/mocks/things.go @@ -6,15 +6,14 @@ package mocks import ( "context" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -var _ mainflux.ThingsServiceClient = (*thingsClient)(nil) +var _ policies.ThingsServiceClient = (*thingsClient)(nil) // ServiceErrToken is used to simulate internal server error. const ServiceErrToken = "unavailable" @@ -24,40 +23,31 @@ type thingsClient struct { } // NewThingsClient returns mock implementation of things service client. -func NewThingsClient(data map[string]string) mainflux.ThingsServiceClient { +func NewThingsClient(data map[string]string) policies.ThingsServiceClient { return &thingsClient{data} } -func (tc thingsClient) CanAccessByKey(ctx context.Context, req *mainflux.AccessByKeyReq, opts ...grpc.CallOption) (*mainflux.ThingID, error) { - key := req.GetToken() +func (tc thingsClient) Authorize(ctx context.Context, req *policies.AuthorizeReq, opts ...grpc.CallOption) (*policies.AuthorizeRes, error) { + secret := req.GetSub() // Since there is no appropriate way to simulate internal server error, // we had to use this obscure approach. ErrorToken simulates gRPC // call which returns internal server error. - if key == ServiceErrToken { - return nil, status.Error(codes.Internal, "internal server error") + if secret == ServiceErrToken { + return &policies.AuthorizeRes{ThingID: "", Authorized: false}, status.Error(codes.Internal, "internal server error") } - if key == "" { - return nil, errors.ErrAuthentication + if secret == "" { + return &policies.AuthorizeRes{ThingID: "", Authorized: false}, errors.ErrAuthentication } - id, ok := tc.things[key] + id, ok := tc.things[secret] if !ok { - return nil, status.Error(codes.Unauthenticated, "invalid credentials provided") + return &policies.AuthorizeRes{ThingID: "", Authorized: false}, status.Error(codes.Unauthenticated, "invalid credentials provided") } - - return &mainflux.ThingID{Value: id}, nil -} - -func (tc thingsClient) CanAccessByID(context.Context, *mainflux.AccessByIDReq, ...grpc.CallOption) (*empty.Empty, error) { - panic("not implemented") -} - -func (tc thingsClient) IsChannelOwner(context.Context, *mainflux.ChannelOwnerReq, ...grpc.CallOption) (*empty.Empty, error) { - panic("not implemented") + return &policies.AuthorizeRes{ThingID: id, Authorized: true}, nil } -func (tc thingsClient) Identify(ctx context.Context, req *mainflux.Token, opts ...grpc.CallOption) (*mainflux.ThingID, error) { +func (tc thingsClient) Identify(ctx context.Context, req *policies.Key, opts ...grpc.CallOption) (*policies.ClientID, error) { panic("not implemented") } diff --git a/http/tracing/adapter.go b/http/tracing/adapter.go index 9aa5d5da11..acefc6e19e 100644 --- a/http/tracing/adapter.go +++ b/http/tracing/adapter.go @@ -5,22 +5,22 @@ import ( "github.com/mainflux/mainflux/http" "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) var _ http.Service = (*serviceMiddleware)(nil) -const publishOP = "publishOP" +const publishOP = "publish" // serviceMiddleware implements the http.Service interface, providing a middleware layer for tracing HTTP requests. -// It creates a new span for each request and sets it as the active span in the OpenTracing context. +// It creates a new span for each request and sets it as the active span in the OpenTelemetry context. type serviceMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer svc http.Service } // New creates a new instance of the http.Service interface with tracing middleware. -func New(tracer opentracing.Tracer, svc http.Service) http.Service { +func New(tracer trace.Tracer, svc http.Service) http.Service { return &serviceMiddleware{ tracer: tracer, svc: svc, @@ -30,12 +30,7 @@ func New(tracer opentracing.Tracer, svc http.Service) http.Service { // Publish traces HTTP publish operations. // It starts a new span as a child of the incoming span (if there is one) and sets it as the active span in the context. func (sm *serviceMiddleware) Publish(ctx context.Context, token string, msg *messaging.Message) error { - var spanCtx opentracing.SpanContext = nil - if httpSpan := opentracing.SpanFromContext(ctx); httpSpan != nil { - spanCtx = httpSpan.Context() - } - span := sm.tracer.StartSpan(publishOP, opentracing.ChildOf(spanCtx)) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := sm.tracer.Start(ctx, publishOP) + defer span.End() return sm.svc.Publish(ctx, token, msg) } diff --git a/internal/api/common.go b/internal/api/common.go new file mode 100644 index 0000000000..3ed74eb9f0 --- /dev/null +++ b/internal/api/common.go @@ -0,0 +1,123 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/gofrs/uuid" + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/postgres" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" +) + +const ( + StatusKey = "status" + OffsetKey = "offset" + LimitKey = "limit" + MetadataKey = "metadata" + ParentKey = "parent_id" + OwnerKey = "owner_id" + ClientKey = "client" + IdentityKey = "identity" + GroupKey = "group" + ActionKey = "action" + TagKey = "tag" + NameKey = "name" + TotalKey = "total" + SubjectKey = "subject" + ObjectKey = "object" + LevelKey = "level" + TreeKey = "tree" + DirKey = "dir" + VisibilityKey = "visibility" + SharedByKey = "shared_by" + TokenKey = "token" + DefTotal = uint64(100) + DefOffset = 0 + DefLimit = 10 + DefLevel = 0 + DefStatus = "enabled" + DefClientStatus = mfclients.Enabled + DefGroupStatus = mfclients.Enabled + SharedVisibility = "shared" + MyVisibility = "mine" + AllVisibility = "all" + // ContentType represents JSON content type. + ContentType = "application/json" + + // MaxNameSize limits name size to prevent making them too complex. + MaxLimitSize = 100 + MaxNameSize = 1024 + NameOrder = "name" + IDOrder = "id" + AscDir = "asc" + DescDir = "desc" +) + +func ValidateUUID(extID string) (err error) { + id, err := uuid.FromString(extID) + if id.String() != extID || err != nil { + return apiutil.ErrInvalidIDFormat + } + + return nil +} + +// EncodeResponse encodes successful response. +func EncodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { + if ar, ok := response.(mainflux.Response); ok { + for k, v := range ar.Headers() { + w.Header().Set(k, v) + } + w.Header().Set("Content-Type", ContentType) + w.WriteHeader(ar.Code()) + + if ar.Empty() { + return nil + } + } + + return json.NewEncoder(w).Encode(response) +} + +// EncodeError encodes an error response. +func EncodeError(_ context.Context, err error, w http.ResponseWriter) { + w.Header().Set("Content-Type", ContentType) + switch { + case errors.Contains(err, errors.ErrMalformedEntity), + err == apiutil.ErrMissingID, + err == apiutil.ErrEmptyList, + err == apiutil.ErrMissingMemberType, + errors.Contains(err, apiutil.ErrInvalidSecret), + err == apiutil.ErrNameSize: + w.WriteHeader(http.StatusBadRequest) + case errors.Contains(err, errors.ErrAuthentication): + w.WriteHeader(http.StatusUnauthorized) + case errors.Contains(err, errors.ErrNotFound): + w.WriteHeader(http.StatusNotFound) + case errors.Contains(err, errors.ErrConflict): + w.WriteHeader(http.StatusConflict) + case errors.Contains(err, errors.ErrAuthorization): + w.WriteHeader(http.StatusForbidden) + case errors.Contains(err, postgres.ErrMemberAlreadyAssigned): + w.WriteHeader(http.StatusConflict) + case errors.Contains(err, errors.ErrUnsupportedContentType): + w.WriteHeader(http.StatusUnsupportedMediaType) + case errors.Contains(err, errors.ErrCreateEntity), + errors.Contains(err, errors.ErrUpdateEntity), + errors.Contains(err, errors.ErrViewEntity), + errors.Contains(err, errors.ErrRemoveEntity): + w.WriteHeader(http.StatusInternalServerError) + default: + w.WriteHeader(http.StatusInternalServerError) + } + + if errorVal, ok := err.(errors.Error); ok { + if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + } +} diff --git a/internal/apiutil/errors.go b/internal/apiutil/errors.go index 137d76454f..dcd0909758 100644 --- a/internal/apiutil/errors.go +++ b/internal/apiutil/errors.go @@ -33,6 +33,9 @@ var ( // ErrInvalidStatus indicates an invalid user account status. ErrInvalidStatus = errors.New("invalid user account status") + // ErrInvalidRole indicates that an invalid role. + ErrInvalidRole = errors.New("invalid client role") + // ErrLimitSize indicates that an invalid limit. ErrLimitSize = errors.New("invalid limit size") @@ -49,16 +52,16 @@ var ( ErrEmptyList = errors.New("empty list provided") // ErrMalformedPolicy indicates that policies are malformed. - ErrMalformedPolicy = errors.New("falmormed policy") + ErrMalformedPolicy = errors.New("malfalmormed policy") // ErrMissingPolicySub indicates that policies are subject. - ErrMissingPolicySub = errors.New("falmormed policy subject") + ErrMissingPolicySub = errors.New("malfalmormed policy subject") // ErrMissingPolicyObj indicates missing policies object. - ErrMissingPolicyObj = errors.New("falmormed policy object") + ErrMissingPolicyObj = errors.New("malfalmormed policy object") - // ErrMissingPolicyAct indicates missing policies action. - ErrMissingPolicyAct = errors.New("falmormed policy action") + // ErrMalformedPolicyAct indicates missing policies action. + ErrMalformedPolicyAct = errors.New("malfalmormed policy action") // ErrMissingCertData indicates missing cert data (ttl). ErrMissingCertData = errors.New("missing certificate data") @@ -101,4 +104,43 @@ var ( // ErrBootstrapState indicates an invalid boostrap state. ErrBootstrapState = errors.New("invalid bootstrap state") + + // ErrMissingIdentity indicates missing entity Identity. + ErrMissingIdentity = errors.New("missing entity identity") + + // ErrMissingSecret indicates missing secret. + ErrMissingSecret = errors.New("missing secret") + + // ErrMissingOwner indicates missing entity owner. + ErrMissingOwner = errors.New("missing entity owner") + + // ErrMissingPolicyOwner indicated malformed policy owner + ErrMissingPolicyOwner = errors.New("malformed policy owner") + + // ErrMissingPolicyEntityType indicates malformed policy entity type + ErrMissingPolicyEntityType = errors.New("malformed or missing entity type") + + // ErrHigherPolicyRank indicates that policies is not the same ranking with parsed policy. + ErrHigherPolicyRank = errors.New("policy is of a higher rank that of the client") + + // ErrMissingName indicates missing identity name. + ErrMissingName = errors.New("missing identity name") + + // ErrInvalidSecret indicates invalid secret. + ErrInvalidSecret = errors.New("missing secret") + + // ErrInvalidLevel indicates an invalid group level. + ErrInvalidLevel = errors.New("invalid group level (should be between 0 and 5)") + + // ErrNotFoundParam indicates that the parameter was not found in the query + ErrNotFoundParam = errors.New("parameter not found in the query") + + // ErrMalformedEntity indicates a malformed entity specification + ErrMalformedEntity = errors.New("malformed entity specification") + + // ErrInvalidQueryParams indicates invalid query parameters + ErrInvalidQueryParams = errors.New("invalid query parameters") + + // ErrInvalidVisibilityType indicates invalid visibility type + ErrInvalidVisibilityType = errors.New("invalid visibility type") ) diff --git a/internal/apiutil/transport.go b/internal/apiutil/transport.go index 57488d6ad6..9884bf4d27 100644 --- a/internal/apiutil/transport.go +++ b/internal/apiutil/transport.go @@ -18,35 +18,38 @@ import ( // LoggingErrorEncoder is a go-kit error encoder logging decorator. func LoggingErrorEncoder(logger logger.Logger, enc kithttp.ErrorEncoder) kithttp.ErrorEncoder { return func(ctx context.Context, err error, w http.ResponseWriter) { - switch err { - case ErrBearerToken, - ErrMissingID, - ErrBearerKey, - ErrInvalidAuthKey, - ErrInvalidIDFormat, - ErrNameSize, - ErrLimitSize, - ErrOffsetSize, - ErrInvalidOrder, - ErrInvalidDirection, - ErrEmptyList, - ErrMalformedPolicy, - ErrMissingPolicySub, - ErrMissingPolicyObj, - ErrMissingPolicyAct, - ErrMissingCertData, - ErrInvalidTopic, - ErrInvalidContact, - ErrMissingEmail, - ErrMissingHost, - ErrMissingPass, - ErrMissingConfPass, - ErrInvalidResetPass, - ErrInvalidComparator, - ErrMissingMemberType, - ErrInvalidAPIKey, - ErrMaxLevelExceeded, - ErrBootstrapState: + switch { + case errors.Contains(err, ErrBearerToken), + errors.Contains(err, ErrMissingID), + errors.Contains(err, ErrBearerKey), + errors.Contains(err, ErrInvalidAuthKey), + errors.Contains(err, ErrInvalidIDFormat), + errors.Contains(err, ErrNameSize), + errors.Contains(err, ErrLimitSize), + errors.Contains(err, ErrOffsetSize), + errors.Contains(err, ErrInvalidOrder), + errors.Contains(err, ErrInvalidDirection), + errors.Contains(err, ErrEmptyList), + errors.Contains(err, ErrMalformedPolicy), + errors.Contains(err, ErrMissingPolicySub), + errors.Contains(err, ErrMissingPolicyObj), + errors.Contains(err, ErrMalformedPolicyAct), + errors.Contains(err, ErrMissingCertData), + errors.Contains(err, ErrInvalidTopic), + errors.Contains(err, ErrInvalidContact), + errors.Contains(err, ErrMissingEmail), + errors.Contains(err, ErrMissingHost), + errors.Contains(err, ErrMissingPass), + errors.Contains(err, ErrMissingConfPass), + errors.Contains(err, ErrInvalidResetPass), + errors.Contains(err, ErrInvalidComparator), + errors.Contains(err, ErrMissingMemberType), + errors.Contains(err, ErrMaxLevelExceeded), + errors.Contains(err, ErrInvalidAPIKey), + errors.Contains(err, ErrInvalidLevel), + errors.Contains(err, ErrBootstrapState), + errors.Contains(err, ErrInvalidQueryParams), + errors.Contains(err, ErrMalformedEntity): logger.Error(err.Error()) } @@ -146,3 +149,36 @@ func ReadFloatQuery(r *http.Request, key string, def float64) (float64, error) { return val, nil } + +type number interface { + int64 | float64 | uint16 | uint64 +} + +// ReadNumQuery returns a numeric value. +func ReadNumQuery[N number](r *http.Request, key string, def N) (N, error) { + vals := bone.GetQuery(r, key) + if len(vals) > 1 { + return 0, errors.ErrInvalidQueryParams + } + if len(vals) == 0 { + return def, nil + } + val := vals[0] + + switch any(def).(type) { + case int64: + v, err := strconv.ParseInt(val, 10, 64) + return N(v), err + case uint64: + v, err := strconv.ParseUint(val, 10, 64) + return N(v), err + case uint16: + v, err := strconv.ParseUint(val, 10, 16) + return N(v), err + case float64: + v, err := strconv.ParseFloat(val, 64) + return N(v), err + default: + return def, nil + } +} diff --git a/internal/clients/grpc/auth/client.go b/internal/clients/grpc/auth/client.go index 58b2bcf3c2..532c0576c6 100644 --- a/internal/clients/grpc/auth/client.go +++ b/internal/clients/grpc/auth/client.go @@ -1,11 +1,11 @@ package auth import ( - "github.com/mainflux/mainflux" - authapi "github.com/mainflux/mainflux/auth/api/grpc" grpcClient "github.com/mainflux/mainflux/internal/clients/grpc" "github.com/mainflux/mainflux/internal/env" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" + authapi "github.com/mainflux/mainflux/users/policies/api/grpc" ) const envAuthGrpcPrefix = "MF_AUTH_GRPC_" @@ -13,16 +13,15 @@ const envAuthGrpcPrefix = "MF_AUTH_GRPC_" var errGrpcConfig = errors.New("failed to load grpc configuration") // Setup loads Auth gRPC configuration from environment variable and creates new Auth gRPC API -func Setup(envPrefix, jaegerURL string) (mainflux.AuthServiceClient, grpcClient.ClientHandler, error) { +func Setup(envPrefix, jaegerURL, svcName string) (policies.AuthServiceClient, grpcClient.ClientHandler, error) { config := grpcClient.Config{} if err := env.Parse(&config, env.Options{Prefix: envAuthGrpcPrefix, AltPrefix: envPrefix}); err != nil { return nil, nil, errors.Wrap(errGrpcConfig, err) } - - c, ch, err := grpcClient.Setup(config, "auth", jaegerURL) + c, ch, err := grpcClient.Setup(config, svcName, jaegerURL) if err != nil { return nil, nil, err } - return authapi.NewClient(c.Tracer, c.ClientConn, config.Timeout), ch, nil + return authapi.NewClient(c.ClientConn, config.Timeout), ch, nil } diff --git a/internal/clients/grpc/connect.go b/internal/clients/grpc/connect.go index dc9e48b16b..5aa58b2f47 100644 --- a/internal/clients/grpc/connect.go +++ b/internal/clients/grpc/connect.go @@ -4,12 +4,14 @@ package grpc import ( - "io" + "context" + "fmt" "time" jaegerClient "github.com/mainflux/mainflux/internal/clients/jaeger" "github.com/mainflux/mainflux/pkg/errors" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + tracesdk "go.opentelemetry.io/otel/sdk/trace" gogrpc "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" @@ -19,7 +21,7 @@ var ( errGrpcConnect = errors.New("failed to connect to grpc server") errJaeger = errors.New("failed to initialize jaeger ") errGrpcClose = errors.New("failed to close grpc connection") - errJaegerClose = errors.New("failed to close jaeger connection") + errJaegerClose = errors.New("failed to shut down jaeger tracer provider") ) type Config struct { @@ -37,8 +39,7 @@ type ClientHandler interface { type Client struct { *gogrpc.ClientConn - opentracing.Tracer - io.Closer + *tracesdk.TracerProvider secure bool } @@ -64,7 +65,7 @@ func Connect(cfg Config) (*gogrpc.ClientConn, bool, error) { secure = true } - opts = append(opts, gogrpc.WithTransportCredentials(tc)) + opts = append(opts, gogrpc.WithTransportCredentials(tc), gogrpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor())) conn, err := gogrpc.Dial(cfg.URL, opts...) if err != nil { @@ -84,33 +85,36 @@ func Setup(config Config, svcName, jaegerURL string) (*Client, ClientHandler, er } // initialize auth tracer for auth grpc client - tracer, tracerCloser, err := jaegerClient.NewTracer(svcName, jaegerURL) + tp, err := jaegerClient.NewProvider(fmt.Sprintf("auth.%s", svcName), jaegerURL) if err != nil { grpcClient.Close() return nil, nil, errors.Wrap(errJaeger, err) } - c := &Client{grpcClient, tracer, tracerCloser, secure} + c := &Client{grpcClient, tp, secure} return c, NewClientHandler(c), nil } +// Close shuts down trace provider. func (c *Client) Close() error { var retErr error err := c.ClientConn.Close() if err != nil { retErr = errors.Wrap(errGrpcClose, err) } - err = c.Closer.Close() - if err != nil { + if err := c.TracerProvider.Shutdown(context.Background()); err != nil { retErr = errors.Wrap(retErr, errors.Wrap(errJaegerClose, err)) } return retErr } +// IsSecure is utility method for checking if +// the client is running with TLS enabled. func (c *Client) IsSecure() bool { return c.secure } +// Secure is used for pretty printing TLS info. func (c *Client) Secure() string { if c.secure { return "with TLS" diff --git a/internal/clients/grpc/things/client.go b/internal/clients/grpc/things/client.go index 581f5821a0..ff12a757bb 100644 --- a/internal/clients/grpc/things/client.go +++ b/internal/clients/grpc/things/client.go @@ -1,11 +1,11 @@ package things import ( - "github.com/mainflux/mainflux" grpcClient "github.com/mainflux/mainflux/internal/clients/grpc" "github.com/mainflux/mainflux/internal/env" "github.com/mainflux/mainflux/pkg/errors" - thingsapi "github.com/mainflux/mainflux/things/api/auth/grpc" + "github.com/mainflux/mainflux/things/policies" + thingsapi "github.com/mainflux/mainflux/things/policies/api/grpc" ) const envThingsAuthGrpcPrefix = "MF_THINGS_AUTH_GRPC_" @@ -13,7 +13,7 @@ const envThingsAuthGrpcPrefix = "MF_THINGS_AUTH_GRPC_" var errGrpcConfig = errors.New("failed to load grpc configuration") // Setup loads Things gRPC configuration from environment variable and creates new Things gRPC API -func Setup(envPrefix, jaegerURL string) (mainflux.ThingsServiceClient, grpcClient.ClientHandler, error) { +func Setup(envPrefix, jaegerURL string) (policies.ThingsServiceClient, grpcClient.ClientHandler, error) { config := grpcClient.Config{} if err := env.Parse(&config, env.Options{Prefix: envThingsAuthGrpcPrefix, AltPrefix: envPrefix}); err != nil { return nil, nil, errors.Wrap(errGrpcConfig, err) @@ -24,5 +24,5 @@ func Setup(envPrefix, jaegerURL string) (mainflux.ThingsServiceClient, grpcClien return nil, nil, err } - return thingsapi.NewClient(c.ClientConn, c.Tracer, config.Timeout), ch, nil + return thingsapi.NewClient(c.ClientConn, config.Timeout), ch, nil } diff --git a/internal/clients/jaeger/provider.go b/internal/clients/jaeger/provider.go new file mode 100644 index 0000000000..ae01d2dc34 --- /dev/null +++ b/internal/clients/jaeger/provider.go @@ -0,0 +1,59 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package jaeger + +import ( + "context" + "errors" + + jaegerp "go.opentelemetry.io/contrib/propagators/jaeger" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" +) + +var ( + errNoURL = errors.New("URL is empty") + errNoSvcName = errors.New("Service Name is empty") +) + +// NewProvider initializes Jaeger TraceProvider +func NewProvider(svcName, url string) (*tracesdk.TracerProvider, error) { + if url == "" { + return nil, errNoURL + } + + if svcName == "" { + return nil, errNoSvcName + } + + exporter, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url))) + if err != nil { + return nil, err + } + + attributes := []attribute.KeyValue{semconv.ServiceNameKey.String(svcName)} + + hostAttr, err := resource.New(context.TODO(), resource.WithHost(), resource.WithOSDescription(), resource.WithContainer()) + if err != nil { + return nil, err + } + attributes = append(attributes, hostAttr.Attributes()...) + + tp := tracesdk.NewTracerProvider( + tracesdk.WithSampler(tracesdk.AlwaysSample()), + tracesdk.WithBatcher(exporter), + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + attributes..., + )), + ) + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(jaegerp.Jaeger{}) + + return tp, nil +} diff --git a/internal/clients/jaeger/tracer.go b/internal/clients/jaeger/tracer.go deleted file mode 100644 index 5fbaf46d74..0000000000 --- a/internal/clients/jaeger/tracer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package jaeger - -import ( - "errors" - "io" - "io/ioutil" - - "github.com/opentracing/opentracing-go" - jconfig "github.com/uber/jaeger-client-go/config" -) - -var ( - errNoUrl = errors.New("URL is empty") - errNoSvcName = errors.New("Service Name is empty") -) - -// NewTracer initializes Jaeger -func NewTracer(svcName, url string) (opentracing.Tracer, io.Closer, error) { - if url == "" { - return opentracing.NoopTracer{}, ioutil.NopCloser(nil), errNoUrl - } - - if svcName == "" { - return opentracing.NoopTracer{}, ioutil.NopCloser(nil), errNoSvcName - } - - return jconfig.Configuration{ - ServiceName: svcName, - Sampler: &jconfig.SamplerConfig{ - Type: "const", - Param: 1, - }, - Reporter: &jconfig.ReporterConfig{ - LocalAgentHostPort: url, - LogSpans: true, - }, - }.NewTracer() -} diff --git a/internal/clients/redis/redis.go b/internal/clients/redis/redis.go index 687a1c8d37..1bd96a3c9c 100644 --- a/internal/clients/redis/redis.go +++ b/internal/clients/redis/redis.go @@ -6,7 +6,7 @@ package redis import ( "strconv" - r "github.com/go-redis/redis/v8" + "github.com/go-redis/redis/v8" "github.com/mainflux/mainflux/internal/env" "github.com/mainflux/mainflux/pkg/errors" ) @@ -24,7 +24,7 @@ type Config struct { } // Setup load configuration from environment, creates new RedisDB client and connect to RedisDB Server -func Setup(prefix string) (*r.Client, error) { +func Setup(prefix string) (*redis.Client, error) { cfg := Config{} if err := env.Parse(&cfg, env.Options{Prefix: prefix}); err != nil { return nil, errors.Wrap(errConfig, err) @@ -37,13 +37,13 @@ func Setup(prefix string) (*r.Client, error) { } // Connect create new RedisDB client and connect to RedisDB server -func Connect(cfg Config) (*r.Client, error) { +func Connect(cfg Config) (*redis.Client, error) { db, err := strconv.Atoi(cfg.DB) if err != nil { return nil, err } - return r.NewClient(&r.Options{ + return redis.NewClient(&redis.Options{ Addr: cfg.URL, Password: cfg.Pass, DB: db, diff --git a/internal/close.go b/internal/close.go index dd66211ed8..96dffb3597 100644 --- a/internal/close.go +++ b/internal/close.go @@ -2,10 +2,10 @@ package internal import ( grpcClient "github.com/mainflux/mainflux/internal/clients/grpc" - logger "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" ) -func Close(log logger.Logger, clientHandler grpcClient.ClientHandler) { +func Close(log mflog.Logger, clientHandler grpcClient.ClientHandler) { if err := clientHandler.Close(); err != nil { log.Warn(err.Error()) } diff --git a/internal/email/email.go b/internal/email/email.go index 1f1ec6af93..fe64f41a96 100644 --- a/internal/email/email.go +++ b/internal/email/email.go @@ -7,6 +7,7 @@ import ( "bytes" "net/mail" "strconv" + "strings" "text/template" "github.com/mainflux/mainflux/pkg/errors" @@ -26,7 +27,9 @@ type email struct { From string Subject string Header string + User string Content string + Host string Footer string } @@ -68,7 +71,7 @@ func New(c *Config) (*Agent, error) { } // Send sends e-mail -func (a *Agent) Send(To []string, From, Subject, Header, Content, Footer string) error { +func (a *Agent) Send(To []string, From, Subject, Header, User, Content, Footer string) error { if a.tmpl == nil { return errMissingEmailTemplate } @@ -79,7 +82,9 @@ func (a *Agent) Send(To []string, From, Subject, Header, Content, Footer string) From: From, Subject: Subject, Header: Header, + User: User, Content: Content, + Host: strings.Split(Content, "?")[0], Footer: Footer, } if From == "" { diff --git a/internal/postgres/common.go b/internal/postgres/common.go new file mode 100644 index 0000000000..84263acfc9 --- /dev/null +++ b/internal/postgres/common.go @@ -0,0 +1,68 @@ +package postgres + +import ( + "context" + "encoding/json" + "errors" + "fmt" +) + +var ( + // ErrAssignToGroup indicates failure to assign member to a group. + ErrAssignToGroup = errors.New("failed to assign member to a group") + + // ErrUnassignFromGroup indicates failure to unassign member from a group. + ErrUnassignFromGroup = errors.New("failed to unassign member from a group") + + // ErrMissingParent indicates that parent can't be found + ErrMissingParent = errors.New("failed to retrieve parent") + + // ErrGroupNotEmpty indicates group is not empty, can't be deleted. + ErrGroupNotEmpty = errors.New("group is not empty") + + // ErrMemberAlreadyAssigned indicates that members is already assigned. + ErrMemberAlreadyAssigned = errors.New("member is already assigned") + + // ErrFailedToRetrieveMembers failed to retrieve group members. + ErrFailedToRetrieveMembers = errors.New("failed to retrieve group members") + + // ErrFailedToRetrieveMembership failed to retrieve memberships + ErrFailedToRetrieveMembership = errors.New("failed to retrieve memberships") + + // ErrFailedToRetrieveAll failed to retrieve groups. + ErrFailedToRetrieveAll = errors.New("failed to retrieve all groups") + + // ErrFailedToRetrieveParents failed to retrieve groups. + ErrFailedToRetrieveParents = errors.New("failed to retrieve all groups") + + // ErrFailedToRetrieveChildren failed to retrieve groups. + ErrFailedToRetrieveChildren = errors.New("failed to retrieve all groups") +) + +func CreateMetadataQuery(entity string, um map[string]interface{}) (string, []byte, error) { + if len(um) == 0 { + return "", nil, nil + } + param, err := json.Marshal(um) + if err != nil { + return "", nil, err + } + query := fmt.Sprintf("%smetadata @> :metadata", entity) + + return query, param, nil +} + +func Total(ctx context.Context, db Database, query string, params interface{}) (uint64, error) { + rows, err := db.NamedQueryContext(ctx, query, params) + if err != nil { + return 0, err + } + defer rows.Close() + total := uint64(0) + if rows.Next() { + if err := rows.Scan(&total); err != nil { + return 0, err + } + } + return total, nil +} diff --git a/internal/postgres/errors.go b/internal/postgres/errors.go new file mode 100644 index 0000000000..487c3a833f --- /dev/null +++ b/internal/postgres/errors.go @@ -0,0 +1,30 @@ +package postgres + +import ( + "github.com/jackc/pgx/v5/pgconn" + "github.com/mainflux/mainflux/pkg/errors" +) + +// Postgres error codes: +// https://www.postgresql.org/docs/current/errcodes-appendix.html +const ( + errDuplicate = "23505" // unique_violation + errTruncation = "22001" // string_data_right_truncation + errFK = "23503" // foreign_key_violation + errInvalid = "22P02" // invalid_text_representation +) + +func HandleError(err, wrapper error) error { + pqErr, ok := err.(*pgconn.PgError) + if ok { + switch pqErr.Code { + case errDuplicate: + return errors.Wrap(errors.ErrConflict, err) + case errInvalid, errTruncation: + return errors.Wrap(errors.ErrMalformedEntity, err) + case errFK: + return errors.Wrap(errors.ErrCreateEntity, err) + } + } + return errors.Wrap(wrapper, err) +} diff --git a/internal/postgres/tracing.go b/internal/postgres/tracing.go new file mode 100644 index 0000000000..66f3082638 --- /dev/null +++ b/internal/postgres/tracing.go @@ -0,0 +1,85 @@ +package postgres + +import ( + "context" + "database/sql" + "fmt" + + "github.com/jmoiron/sqlx" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ Database = (*database)(nil) + +type database struct { + db *sqlx.DB + tracer trace.Tracer +} + +// Database provides a database interface +type Database interface { + NamedQueryContext(context.Context, string, interface{}) (*sqlx.Rows, error) + NamedExecContext(context.Context, string, interface{}) (sql.Result, error) + QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row + QueryxContext(context.Context, string, ...interface{}) (*sqlx.Rows, error) + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + BeginTxx(ctx context.Context, opts *sql.TxOptions) (*sqlx.Tx, error) +} + +// NewDatabase creates a Clients'Database instance +func NewDatabase(db *sqlx.DB, tracer trace.Tracer) Database { + return &database{ + db: db, + tracer: tracer, + } +} + +func (d database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { + ctx, span := d.addSpanTags(ctx, "NamedQueryContext", query) + defer span.End() + return d.db.NamedQueryContext(ctx, query, args) +} + +func (d database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { + ctx, span := d.addSpanTags(ctx, "NamedExecContext", query) + defer span.End() + return d.db.NamedExecContext(ctx, query, args) +} + +func (d database) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + ctx, span := d.addSpanTags(ctx, "ExecContext", query) + defer span.End() + return d.db.ExecContext(ctx, query, args...) +} + +func (d database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { + ctx, span := d.addSpanTags(ctx, "QueryRowxContext", query) + defer span.End() + return d.db.QueryRowxContext(ctx, query, args...) +} + +func (d database) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + ctx, span := d.addSpanTags(ctx, "QueryxContext", query) + defer span.End() + return d.db.QueryxContext(ctx, query, args...) +} + +func (d database) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*sqlx.Tx, error) { + ctx, span := d.addSpanTags(ctx, "sql_beginTxx", "") + defer span.End() + return d.db.BeginTxx(ctx, opts) +} + +func (d database) addSpanTags(ctx context.Context, method, query string) (context.Context, trace.Span) { + ctx, span := d.tracer.Start(ctx, + fmt.Sprintf("sql_%s", method), + trace.WithAttributes( + attribute.String("sql.statement", query), + attribute.String("span.kind", "client"), + attribute.String("peer.service", "postgres"), + attribute.String("db.type", "sql"), + ), + ) + return ctx, span +} diff --git a/internal/server/grpc/grpc.go b/internal/server/grpc/grpc.go index cc16241b38..f9bd1b9ecc 100644 --- a/internal/server/grpc/grpc.go +++ b/internal/server/grpc/grpc.go @@ -8,6 +8,7 @@ import ( "github.com/mainflux/mainflux/internal/server" "github.com/mainflux/mainflux/logger" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) @@ -56,10 +57,15 @@ func (s *Server) Start() error { return fmt.Errorf("failed to load auth certificates: %w", err) } s.Logger.Info(fmt.Sprintf("%s service gRPC server listening at %s with TLS cert %s and key %s", s.Name, s.Address, s.Config.CertFile, s.Config.KeyFile)) - s.server = grpc.NewServer(grpc.Creds(creds)) + s.server = grpc.NewServer( + grpc.Creds(creds), + grpc.UnaryInterceptor(otelgrpc.UnaryServerInterceptor()), + ) default: s.Logger.Info(fmt.Sprintf("%s service gRPC server listening at %s without TLS", s.Name, s.Address)) - s.server = grpc.NewServer() + s.server = grpc.NewServer( + grpc.UnaryInterceptor(otelgrpc.UnaryServerInterceptor()), + ) } s.registerService(s.server) diff --git a/internal/testsutil/common.go b/internal/testsutil/common.go new file mode 100644 index 0000000000..87c8bdbee9 --- /dev/null +++ b/internal/testsutil/common.go @@ -0,0 +1,53 @@ +package testsutil + +import ( + "context" + "fmt" + "testing" + + "github.com/jmoiron/sqlx" + "github.com/mainflux/mainflux" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/clients" + cmocks "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func GenerateUUID(t *testing.T, idProvider mainflux.IDProvider) string { + ulid, err := idProvider.ID() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + return ulid +} + +func GenerateValidToken(t *testing.T, clientID string, svc clients.Service, cRepo *cmocks.ClientRepository, phasher clients.Hasher) string { + client := mfclients.Client{ + ID: clientID, + Name: "validtoken", + Credentials: mfclients.Credentials{ + Identity: "validtoken", + Secret: "secret", + }, + Status: mfclients.EnabledStatus, + } + rClient := client + rClient.Credentials.Secret, _ = phasher.Hash(client.Credentials.Secret) + + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), client.Credentials.Identity).Return(rClient, nil) + token, err := svc.IssueToken(context.Background(), client.Credentials.Identity, client.Credentials.Secret) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("Create token expected nil got %s\n", err)) + ok := repoCall.Parent.AssertCalled(t, "RetrieveByIdentity", context.Background(), client.Credentials.Identity) + assert.True(t, ok, "RetrieveByIdentity was not called on creating token") + repoCall.Unset() + return token.AccessToken +} + +func CleanUpDB(t *testing.T, db *sqlx.DB) { + _, err := db.Exec("DELETE FROM policies") + require.Nil(t, err, fmt.Sprintf("clean policies unexpected error: %s", err)) + _, err = db.Exec("DELETE FROM groups") + require.Nil(t, err, fmt.Sprintf("clean groups unexpected error: %s", err)) + _, err = db.Exec("DELETE FROM clients") + require.Nil(t, err, fmt.Sprintf("clean clients unexpected error: %s", err)) +} diff --git a/logger/logger_test.go b/logger/logger_test.go index b82dbe1b1b..8ac096aceb 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -11,7 +11,7 @@ import ( "os/exec" "testing" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -24,7 +24,7 @@ const ( ) var _ io.Writer = (*mockWriter)(nil) -var logger log.Logger +var logger mflog.Logger var err error var output logMsg @@ -59,32 +59,32 @@ func TestDebug(t *testing.T) { { desc: "debug log ordinary string", input: "input_string", - level: log.Debug.String(), - output: logMsg{log.Debug.String(), "input_string", ""}, + level: mflog.Debug.String(), + output: logMsg{mflog.Debug.String(), "input_string", ""}, }, { desc: "debug log empty string", input: "", - level: log.Debug.String(), - output: logMsg{log.Debug.String(), "", ""}, + level: mflog.Debug.String(), + output: logMsg{mflog.Debug.String(), "", ""}, }, { desc: "debug ordinary string lvl not allowed", input: "input_string", - level: log.Info.String(), + level: mflog.Info.String(), output: logMsg{"", "", ""}, }, { desc: "debug empty string lvl not allowed", input: "", - level: log.Info.String(), + level: mflog.Info.String(), output: logMsg{"", "", ""}, }, } for _, tc := range cases { writer := mockWriter{} - logger, err = log.New(&writer, tc.level) + logger, err = mflog.New(&writer, tc.level) assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) logger.Debug(tc.input) output, err = writer.Read() @@ -102,32 +102,32 @@ func TestInfo(t *testing.T) { { desc: "info log ordinary string", input: "input_string", - level: log.Info.String(), - output: logMsg{log.Info.String(), "input_string", ""}, + level: mflog.Info.String(), + output: logMsg{mflog.Info.String(), "input_string", ""}, }, { desc: "info log empty string", input: "", - level: log.Info.String(), - output: logMsg{log.Info.String(), "", ""}, + level: mflog.Info.String(), + output: logMsg{mflog.Info.String(), "", ""}, }, { desc: "info ordinary string lvl not allowed", input: "input_string", - level: log.Warn.String(), + level: mflog.Warn.String(), output: logMsg{"", "", ""}, }, { desc: "info empty string lvl not allowed", input: "", - level: log.Warn.String(), + level: mflog.Warn.String(), output: logMsg{"", "", ""}, }, } for _, tc := range cases { writer := mockWriter{} - logger, err = log.New(&writer, tc.level) + logger, err = mflog.New(&writer, tc.level) assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) logger.Info(tc.input) output, err = writer.Read() @@ -145,32 +145,32 @@ func TestWarn(t *testing.T) { { desc: "warn log ordinary string", input: "input_string", - level: log.Warn.String(), - output: logMsg{log.Warn.String(), "input_string", ""}, + level: mflog.Warn.String(), + output: logMsg{mflog.Warn.String(), "input_string", ""}, }, { desc: "warn log empty string", input: "", - level: log.Warn.String(), - output: logMsg{log.Warn.String(), "", ""}, + level: mflog.Warn.String(), + output: logMsg{mflog.Warn.String(), "", ""}, }, { desc: "warn ordinary string lvl not allowed", input: "input_string", - level: log.Error.String(), + level: mflog.Error.String(), output: logMsg{"", "", ""}, }, { desc: "warn empty string lvl not allowed", input: "", - level: log.Error.String(), + level: mflog.Error.String(), output: logMsg{"", "", ""}, }, } for _, tc := range cases { writer := mockWriter{} - logger, err = log.New(&writer, tc.level) + logger, err = mflog.New(&writer, tc.level) require.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) logger.Warn(tc.input) output, err = writer.Read() @@ -187,17 +187,17 @@ func TestError(t *testing.T) { { desc: "error log ordinary string", input: "input_string", - output: logMsg{log.Error.String(), "input_string", ""}, + output: logMsg{mflog.Error.String(), "input_string", ""}, }, { desc: "error log empty string", input: "", - output: logMsg{log.Error.String(), "", ""}, + output: logMsg{mflog.Error.String(), "", ""}, }, } writer := mockWriter{} - logger, err := log.New(&writer, log.Error.String()) + logger, err := mflog.New(&writer, mflog.Error.String()) require.Nil(t, err) for _, tc := range cases { logger.Error(tc.input) @@ -211,7 +211,7 @@ func TestFatal(t *testing.T) { // This is the actually Fatal call we test that will // be executed in the subprocess spawned by the test. if os.Getenv(testFlag) == testFlagVal { - logger, err := log.New(os.Stderr, log.Error.String()) + logger, err := mflog.New(os.Stderr, mflog.Error.String()) require.Nil(t, err) msg := os.Getenv(testMsg) logger.Fatal(msg) diff --git a/lora/redis/streams.go b/lora/redis/streams.go index 6d804a9199..5a2f6d5b14 100644 --- a/lora/redis/streams.go +++ b/lora/redis/streams.go @@ -7,7 +7,7 @@ import ( "fmt" "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/lora" ) @@ -54,11 +54,11 @@ type eventStore struct { svc lora.Service client *redis.Client consumer string - logger logger.Logger + logger mflog.Logger } // NewEventStore returns new event store instance. -func NewEventStore(svc lora.Service, client *redis.Client, consumer string, log logger.Logger) Subscriber { +func NewEventStore(svc lora.Service, client *redis.Client, consumer string, log mflog.Logger) Subscriber { return eventStore{ svc: svc, client: client, diff --git a/mqtt/README.md b/mqtt/README.md index a56e908991..221321ce31 100644 --- a/mqtt/README.md +++ b/mqtt/README.md @@ -32,9 +32,9 @@ default values. | MF_MQTT_ADAPTER_ES_URL | Event sourcing URL | localhost:6379 | | MF_MQTT_ADAPTER_ES_PASS | Event sourcing password | "" | | MF_MQTT_ADAPTER_ES_DB | Event sourcing database | "0" | -| MF_AUTH_CACHE_URL | Auth cache URL | localhost:6379 | -| MF_AUTH_CACHE_PASS | Auth cache password | "" | -| MF_AUTH_CACHE_DB | Auth cache database | "0" | +| MF_AUTH_CACHE_URL | Users cache URL | localhost:6379 | +| MF_AUTH_CACHE_PASS | Users cache password | "" | +| MF_AUTH_CACHE_DB | Users cache database | "0" | ## Deployment @@ -76,9 +76,9 @@ MF_MQTT_ADAPTER_INSTANCE=[Instance for event sourcing] \ MF_MQTT_ADAPTER_ES_URL=[Event sourcing URL] \ MF_MQTT_ADAPTER_ES_PASS=[Event sourcing pass] \ MF_MQTT_ADAPTER_ES_DB=[Event sourcing database] \ -MF_AUTH_CACHE_URL=[Auth cache URL] \ -MF_AUTH_CACHE_PASS=[Auth cache pass] \ -MF_AUTH_CACHE_DB=[Auth cache DB name] \ +MF_AUTH_CACHE_URL=[Users cache URL] \ +MF_AUTH_CACHE_PASS=[Users cache pass] \ +MF_AUTH_CACHE_DB=[Users cache DB name] \ $GOBIN/mainflux-mqtt ``` diff --git a/mqtt/forwarder.go b/mqtt/forwarder.go index 6ad39061f2..2877a22d31 100644 --- a/mqtt/forwarder.go +++ b/mqtt/forwarder.go @@ -8,15 +8,10 @@ import ( "fmt" "strings" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" ) -const ( - channels = "channels" - messages = "messages" -) - // Forwarder specifies MQTT forwarder interface API. type Forwarder interface { // Forward subscribes to the Subscriber and @@ -26,11 +21,11 @@ type Forwarder interface { type forwarder struct { topic string - logger log.Logger + logger mflog.Logger } // NewForwarder returns new Forwarder implementation. -func NewForwarder(topic string, logger log.Logger) Forwarder { +func NewForwarder(topic string, logger mflog.Logger) Forwarder { return forwarder{ topic: topic, logger: logger, @@ -41,16 +36,16 @@ func (f forwarder) Forward(ctx context.Context, id string, sub messaging.Subscri return sub.Subscribe(ctx, id, f.topic, handle(ctx, pub, f.logger)) } -func handle(ctx context.Context, pub messaging.Publisher, logger log.Logger) handleFunc { +func handle(ctx context.Context, pub messaging.Publisher, logger mflog.Logger) handleFunc { return func(msg *messaging.Message) error { if msg.Protocol == protocol { return nil } // Use concatenation instead of fmt.Sprintf for the // sake of simplicity and performance. - topic := channels + "/" + msg.Channel + "/" + messages + topic := fmt.Sprintf("channels/%s/messages", msg.Channel) if msg.Subtopic != "" { - topic += "/" + strings.ReplaceAll(msg.Subtopic, ".", "/") + topic += fmt.Sprintf("%s/%s", topic, strings.ReplaceAll(msg.Subtopic, ".", "/")) } go func() { if err := pub.Publish(ctx, topic, msg); err != nil { diff --git a/mqtt/handler.go b/mqtt/handler.go index 13f2c794d6..dc8edb1fcc 100644 --- a/mqtt/handler.go +++ b/mqtt/handler.go @@ -16,6 +16,8 @@ import ( "github.com/mainflux/mainflux/pkg/auth" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/things/policies" + "github.com/mainflux/mproxy/pkg/session" ) @@ -109,7 +111,7 @@ func (h *handler) AuthPublish(ctx context.Context, topic *string, payload *[]byt return ErrMissingTopicPub } - return h.authAccess(ctx, s.Username, *topic) + return h.authAccess(ctx, s.Username, *topic, policies.WriteAction) } // AuthSubscribe is called on device publish, @@ -124,7 +126,7 @@ func (h *handler) AuthSubscribe(ctx context.Context, topics *[]string) error { } for _, v := range *topics { - if err := h.authAccess(ctx, s.Username, v); err != nil { + if err := h.authAccess(ctx, s.Username, v, policies.ReadAction); err != nil { return err } @@ -218,7 +220,7 @@ func (h *handler) Disconnect(ctx context.Context) { } } -func (h *handler) authAccess(ctx context.Context, username string, topic string) error { +func (h *handler) authAccess(ctx context.Context, username, topic, action string) error { // Topics are in the format: // channels//messages//.../ct/ if !channelRegExp.Match([]byte(topic)) { @@ -231,7 +233,7 @@ func (h *handler) authAccess(ctx context.Context, username string, topic string) } chanID := channelParts[1] - return h.auth.Authorize(ctx, chanID, username) + return h.auth.Authorize(ctx, chanID, username, action) } func parseSubtopic(subtopic string) (string, error) { diff --git a/mqtt/mocks/auth.go b/mqtt/mocks/auth.go index 8dd90fe510..4bb36f0d2b 100644 --- a/mqtt/mocks/auth.go +++ b/mqtt/mocks/auth.go @@ -2,6 +2,7 @@ package mocks import ( "context" + "github.com/mainflux/mainflux/pkg/auth" "github.com/mainflux/mainflux/pkg/errors" ) @@ -15,7 +16,7 @@ func NewClient(key map[string]string, conns map[string]interface{}) auth.Client return MockClient{key: key, conns: conns} } -func (cli MockClient) Authorize(ctx context.Context, chanID, thingID string) error { +func (cli MockClient) Authorize(ctx context.Context, chanID, thingID, action string) error { for k, v := range cli.conns { if k == chanID && v == thingID { return nil diff --git a/mqtt/redis/events.go b/mqtt/redis/events.go index f3fa2aafb3..58ddceeaab 100644 --- a/mqtt/redis/events.go +++ b/mqtt/redis/events.go @@ -13,7 +13,6 @@ var ( type mqttEvent struct { clientID string - timestamp string eventType string instance string } @@ -21,7 +20,6 @@ type mqttEvent struct { func (me mqttEvent) Encode() map[string]interface{} { return map[string]interface{}{ "thing_id": me.clientID, - "timestamp": me.timestamp, "event_type": me.eventType, "instance": me.instance, } diff --git a/mqtt/redis/streams.go b/mqtt/redis/streams.go index cac90e4818..c2efe788ac 100644 --- a/mqtt/redis/streams.go +++ b/mqtt/redis/streams.go @@ -5,8 +5,6 @@ package redis import ( "context" - "strconv" - "time" "github.com/go-redis/redis/v8" ) @@ -37,11 +35,8 @@ func NewEventStore(client *redis.Client, instance string) EventStore { } func (es eventStore) storeEvent(clientID, eventType string) error { - timestamp := strconv.FormatInt(time.Now().Unix(), 10) - event := mqttEvent{ clientID: clientID, - timestamp: timestamp, eventType: eventType, instance: es.instance, } diff --git a/mqtt/tracing/forwarder.go b/mqtt/tracing/forwarder.go index 6bfa1c50ce..87bf3f472e 100644 --- a/mqtt/tracing/forwarder.go +++ b/mqtt/tracing/forwarder.go @@ -5,7 +5,8 @@ import ( "github.com/mainflux/mainflux/mqtt" "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) const forwardOP = "forward_op" @@ -15,11 +16,11 @@ var _ mqtt.Forwarder = (*forwarderMiddleware)(nil) type forwarderMiddleware struct { topic string forwarder mqtt.Forwarder - tracer opentracing.Tracer + tracer trace.Tracer } // New creates new mqtt forwarder tracing middleware. -func New(tracer opentracing.Tracer, forwarder mqtt.Forwarder, topic string) mqtt.Forwarder { +func New(tracer trace.Tracer, forwarder mqtt.Forwarder, topic string) mqtt.Forwarder { return &forwarderMiddleware{ forwarder: forwarder, tracer: tracer, @@ -29,9 +30,14 @@ func New(tracer opentracing.Tracer, forwarder mqtt.Forwarder, topic string) mqtt // Forward traces mqtt forward operations func (fm *forwarderMiddleware) Forward(ctx context.Context, id string, sub messaging.Subscriber, pub messaging.Publisher) error { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, fm.tracer, forwardOP) - defer span.Finish() - span.SetTag("subscriber", id) - span.SetTag("topic", fm.topic) + ctx, span := fm.tracer.Start(ctx, + forwardOP, + trace.WithAttributes( + attribute.String("topic", fm.topic), + attribute.String("subscriber", id), + ), + ) + defer span.End() + return fm.forwarder.Forward(ctx, id, sub, pub) } diff --git a/mqtt/tracing/handler.go b/mqtt/tracing/handler.go index 20beea3e3f..b740842bb0 100644 --- a/mqtt/tracing/handler.go +++ b/mqtt/tracing/handler.go @@ -4,7 +4,8 @@ import ( "context" "github.com/mainflux/mproxy/pkg/session" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) const ( @@ -22,11 +23,11 @@ var _ session.Handler = (*handlerMiddleware)(nil) type handlerMiddleware struct { handler session.Handler - tracer opentracing.Tracer + tracer trace.Tracer } -// NewHandler creates a new session.Handler middlware with tracing. -func NewHandler(tracer opentracing.Tracer, handler session.Handler) session.Handler { +// NewHandler creates a new session.Handler middleware with tracing. +func NewHandler(tracer trace.Tracer, handler session.Handler) session.Handler { return &handlerMiddleware{ tracer: tracer, handler: handler, @@ -35,56 +36,78 @@ func NewHandler(tracer opentracing.Tracer, handler session.Handler) session.Hand // AuthConnect traces auth connect operations. func (h *handlerMiddleware) AuthConnect(ctx context.Context) error { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, authConnectOP) - defer span.Finish() + kvOpts := []attribute.KeyValue{} + s, ok := session.FromContext(ctx) + if ok { + kvOpts = append(kvOpts, attribute.String("client_id", s.ID)) + kvOpts = append(kvOpts, attribute.String("username", s.Username)) + } + ctx, span := h.tracer.Start(ctx, authConnectOP, trace.WithAttributes(kvOpts...)) + defer span.End() return h.handler.AuthConnect(ctx) } // AuthPublish traces auth publish operations. func (h *handlerMiddleware) AuthPublish(ctx context.Context, topic *string, payload *[]byte) error { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, authPublishOP) - defer span.Finish() + kvOpts := []attribute.KeyValue{} + s, ok := session.FromContext(ctx) + if ok { + kvOpts = append(kvOpts, attribute.String("client_id", s.ID)) + if topic != nil { + kvOpts = append(kvOpts, attribute.String("topic", *topic)) + } + } + ctx, span := h.tracer.Start(ctx, authPublishOP, trace.WithAttributes(kvOpts...)) + defer span.End() return h.handler.AuthPublish(ctx, topic, payload) } // AuthSubscribe traces auth subscribe operations. func (h *handlerMiddleware) AuthSubscribe(ctx context.Context, topics *[]string) error { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, authSubscribeOP) - defer span.Finish() + kvOpts := []attribute.KeyValue{} + s, ok := session.FromContext(ctx) + if ok { + kvOpts = append(kvOpts, attribute.String("client_id", s.ID)) + if topics != nil { + kvOpts = append(kvOpts, attribute.StringSlice("topics", *topics)) + } + } + ctx, span := h.tracer.Start(ctx, authSubscribeOP, trace.WithAttributes(kvOpts...)) + defer span.End() return h.handler.AuthSubscribe(ctx, topics) } // Connect traces connect operations. func (h *handlerMiddleware) Connect(ctx context.Context) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, connectOP) - defer span.Finish() + ctx, span := h.tracer.Start(ctx, connectOP) + defer span.End() h.handler.Connect(ctx) } // Disconnect traces disconnect operations. func (h *handlerMiddleware) Disconnect(ctx context.Context) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, disconnectOP) - defer span.Finish() + ctx, span := h.tracer.Start(ctx, disconnectOP) + defer span.End() h.handler.Disconnect(ctx) } // Publish traces publish operations. func (h *handlerMiddleware) Publish(ctx context.Context, topic *string, payload *[]byte) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, publishOP) - defer span.Finish() + ctx, span := h.tracer.Start(ctx, publishOP) + defer span.End() h.handler.Publish(ctx, topic, payload) } // Subscribe traces subscribe operations. func (h *handlerMiddleware) Subscribe(ctx context.Context, topics *[]string) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, subscribeOP) - defer span.Finish() + ctx, span := h.tracer.Start(ctx, subscribeOP) + defer span.End() h.handler.Subscribe(ctx, topics) } // Unsubscribe traces unsubscribe operations. func (h *handlerMiddleware) Unsubscribe(ctx context.Context, topics *[]string) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, h.tracer, unsubscribeOP) - defer span.Finish() + ctx, span := h.tracer.Start(ctx, unsubscribeOP) + defer span.End() h.handler.Unsubscribe(ctx, topics) } diff --git a/opcua/api/transport.go b/opcua/api/transport.go index 86bf99f40d..bfa3b6a2a3 100644 --- a/opcua/api/transport.go +++ b/opcua/api/transport.go @@ -12,7 +12,7 @@ import ( "github.com/go-zoo/bone" "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/opcua" "github.com/mainflux/mainflux/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -28,7 +28,7 @@ const ( ) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc opcua.Service, logger logger.Logger) http.Handler { +func MakeHandler(svc opcua.Service, logger mflog.Logger) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), } diff --git a/opcua/redis/streams.go b/opcua/redis/streams.go index 1ed1fdfeed..0291b6dd65 100644 --- a/opcua/redis/streams.go +++ b/opcua/redis/streams.go @@ -10,7 +10,7 @@ import ( "fmt" "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/opcua" ) @@ -53,11 +53,11 @@ type eventStore struct { svc opcua.Service client *redis.Client consumer string - logger logger.Logger + logger mflog.Logger } // NewEventStore returns new event store instance. -func NewEventStore(svc opcua.Service, client *redis.Client, consumer string, log logger.Logger) opcua.EventStore { +func NewEventStore(svc opcua.Service, client *redis.Client, consumer string, log mflog.Logger) opcua.EventStore { return eventStore{ svc: svc, client: client, diff --git a/pkg/auth/client.go b/pkg/auth/client.go index 23757c2632..c6864060f5 100644 --- a/pkg/auth/client.go +++ b/pkg/auth/client.go @@ -7,12 +7,13 @@ import ( "context" "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" ) // Client represents Auth cache. type Client interface { - Authorize(ctx context.Context, chanID, thingID string) error + Authorize(ctx context.Context, chanID, thingID, action string) error Identify(ctx context.Context, thingKey string) (string, error) } @@ -23,11 +24,11 @@ const ( type client struct { redisClient *redis.Client - thingsClient mainflux.ThingsServiceClient + thingsClient policies.ThingsServiceClient } // New returns redis channel cache implementation. -func New(redisClient *redis.Client, thingsClient mainflux.ThingsServiceClient) Client { +func New(redisClient *redis.Client, thingsClient policies.ThingsServiceClient) Client { return client{ redisClient: redisClient, thingsClient: thingsClient, @@ -38,8 +39,8 @@ func (c client) Identify(ctx context.Context, thingKey string) (string, error) { tkey := keyPrefix + ":" + thingKey thingID, err := c.redisClient.Get(ctx, tkey).Result() if err != nil { - t := &mainflux.Token{ - Value: thingKey, + t := &policies.Key{ + Value: string(thingKey), } thid, err := c.thingsClient.Identify(context.TODO(), t) @@ -51,15 +52,24 @@ func (c client) Identify(ctx context.Context, thingKey string) (string, error) { return thingID, nil } -func (c client) Authorize(ctx context.Context, chanID, thingID string) error { +func (c client) Authorize(ctx context.Context, chanID, thingID, action string) error { if c.redisClient.SIsMember(ctx, chanPrefix+":"+chanID, thingID).Val() { return nil } - ar := &mainflux.AccessByIDReq{ - ThingID: thingID, - ChanID: chanID, + ar := &policies.AuthorizeReq{ + Sub: thingID, + Obj: chanID, + Act: action, + EntityType: policies.GroupEntityType, } - _, err := c.thingsClient.CanAccessByID(ctx, ar) + res, err := c.thingsClient.Authorize(ctx, ar) + if err != nil { + return err + } + if !res.GetAuthorized() { + return errors.ErrAuthorization + } + return err } diff --git a/pkg/clients/clients.go b/pkg/clients/clients.go new file mode 100644 index 0000000000..4efe034621 --- /dev/null +++ b/pkg/clients/clients.go @@ -0,0 +1,172 @@ +package clients + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + "github.com/mainflux/mainflux/pkg/errors" + "golang.org/x/net/idna" +) + +const ( + maxLocalLen = 64 + maxDomainLen = 255 + maxTLDLen = 24 // longest TLD currently in existence + + atSeparator = "@" + dotSeparator = "." +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile(`^[^\s]+\.[^\s]+$`) + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") +) + +// Credentials represent client credentials: its +// "identity" which can be a username, email, generated name; +// and "secret" which can be a password or access token. +type Credentials struct { + Identity string `json:"identity,omitempty"` // username or generated login ID + Secret string `json:"secret"` // password or token +} + +// Client represents generic Client. +type Client struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Tags []string `json:"tags,omitempty"` + Owner string `json:"owner,omitempty"` // nullable + Credentials Credentials `json:"credentials"` + Metadata Metadata `json:"metadata,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Status Status `json:"status"` // 1 for enabled, 0 for disabled + Role Role `json:"role,omitempty"` // 1 for admin, 0 for normal user +} + +// ClientsPage contains page related metadata as well as list +// of Clients that belong to the page. +type ClientsPage struct { + Page + Clients []Client +} + +// MembersPage contains page related metadata as well as list of members that +// belong to this page. +type MembersPage struct { + Page + Members []Client +} + +// Repository specifies an account persistence API. +type Repository interface { + // Save persists the client account. A non-nil error is returned to indicate + // operation failure. + Save(ctx context.Context, client ...Client) ([]Client, error) + + // RetrieveByID retrieves client by its unique ID. + RetrieveByID(ctx context.Context, id string) (Client, error) + + // RetrieveByIdentity retrieves client by its unique credentials + RetrieveByIdentity(ctx context.Context, identity string) (Client, error) + + // RetrieveAll retrieves all clients. + RetrieveAll(ctx context.Context, pm Page) (ClientsPage, error) + + // Members retrieves everything that is assigned to a group identified by groupID. + Members(ctx context.Context, groupID string, pm Page) (MembersPage, error) + + // Update updates the client name and metadata. + Update(ctx context.Context, client Client) (Client, error) + + // UpdateTags updates the client tags. + UpdateTags(ctx context.Context, client Client) (Client, error) + + // UpdateIdentity updates identity for client with given id. + UpdateIdentity(ctx context.Context, client Client) (Client, error) + + // UpdateSecret updates secret for client with given identity. + UpdateSecret(ctx context.Context, client Client) (Client, error) + + // UpdateOwner updates owner for client with given id. + UpdateOwner(ctx context.Context, client Client) (Client, error) + + // ChangeStatus changes client status to enabled or disabled + ChangeStatus(ctx context.Context, client Client) (Client, error) + + RetrieveBySecret(ctx context.Context, key string) (Client, error) +} + +// Validate returns an error if client representation is invalid. +func (u Client) Validate() error { + if !isEmail(u.Credentials.Identity) { + return errors.ErrMalformedEntity + } + return nil +} + +func isEmail(email string) bool { + if email == "" { + return false + } + + es := strings.Split(email, atSeparator) + if len(es) != 2 { + return false + } + local, host := es[0], es[1] + + if local == "" || len(local) > maxLocalLen { + return false + } + + hs := strings.Split(host, dotSeparator) + if len(hs) < 2 { + return false + } + domain, ext := hs[0], hs[1] + + // Check subdomain and validate + if len(hs) > 2 { + if domain == "" { + return false + } + + for i := 1; i < len(hs)-1; i++ { + sub := hs[i] + if sub == "" { + return false + } + domain = fmt.Sprintf("%s.%s", domain, sub) + } + + ext = hs[len(hs)-1] + } + + if domain == "" || len(domain) > maxDomainLen { + return false + } + if ext == "" || len(ext) > maxTLDLen { + return false + } + + punyLocal, err := idna.ToASCII(local) + if err != nil { + return false + } + punyHost, err := idna.ToASCII(host) + if err != nil { + return false + } + + if userDotRegexp.MatchString(punyLocal) || !userRegexp.MatchString(punyLocal) || !hostRegexp.MatchString(punyHost) { + return false + } + + return true +} diff --git a/pkg/clients/errors.go b/pkg/clients/errors.go new file mode 100644 index 0000000000..766706f954 --- /dev/null +++ b/pkg/clients/errors.go @@ -0,0 +1,14 @@ +package clients + +import "errors" + +var ( + // ErrInvalidStatus indicates invalid status. + ErrInvalidStatus = errors.New("invalid client status") + + // ErrEnableClient indicates error in enabling client. + ErrEnableClient = errors.New("failed to enable client") + + // ErrDisableClient indicates error in disabling client. + ErrDisableClient = errors.New("failed to disable client") +) diff --git a/pkg/clients/page.go b/pkg/clients/page.go new file mode 100644 index 0000000000..c4382d34cd --- /dev/null +++ b/pkg/clients/page.go @@ -0,0 +1,21 @@ +package clients + +// Page contains page metadata that helps navigation. +type Page struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + Name string `json:"name,omitempty"` + Order string `json:"order,omitempty"` + Dir string `json:"dir,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Disconnected bool `json:"disconnected,omitempty"` // Used for connected or disconnected lists + Owner string `json:"owner,omitempty"` + Tag string `json:"tag,omitempty"` + SharedBy string `json:"shared_by,omitempty"` + Status Status `json:"status,omitempty"` + Action string `json:"action,omitempty"` + Subject string `json:"subject,omitempty"` + IDs []string `json:"ids,omitempty"` + Identity string `json:"identity,omitempty"` +} diff --git a/pkg/clients/roles.go b/pkg/clients/roles.go new file mode 100644 index 0000000000..0093a0c8a7 --- /dev/null +++ b/pkg/clients/roles.go @@ -0,0 +1,57 @@ +package clients + +import ( + "encoding/json" + "strings" + + "github.com/mainflux/mainflux/internal/apiutil" +) + +// Role represents Client role. +type Role uint8 + +// Possible Client role values +const ( + UserRole Role = iota + AdminRole +) + +// String representation of the possible role values. +const ( + Admin = "admin" + User = "user" +) + +// String converts client role to string literal. +func (cs Role) String() string { + switch cs { + case AdminRole: + return Admin + case UserRole: + return User + default: + return Unknown + } +} + +// ToRole converts string value to a valid Client role. +func ToRole(status string) (Role, error) { + switch status { + case "", User: + return UserRole, nil + case Admin: + return AdminRole, nil + } + return Role(0), apiutil.ErrInvalidRole +} + +func (r Role) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +func (r *Role) UnmarshalJSON(data []byte) error { + str := strings.Trim(string(data), "\"") + val, err := ToRole(str) + *r = val + return err +} diff --git a/pkg/clients/status.go b/pkg/clients/status.go new file mode 100644 index 0000000000..8c8327bd2b --- /dev/null +++ b/pkg/clients/status.go @@ -0,0 +1,79 @@ +package clients + +import ( + "encoding/json" + "errors" + "strings" + + "github.com/mainflux/mainflux/internal/apiutil" +) + +// Status represents Client status. +type Status uint8 + +// Possible Client status values +const ( + // EnabledStatus represents enabled Client. + EnabledStatus Status = iota + // DisabledStatus represents disabled Client. + DisabledStatus + + // AllStatus is used for querying purposes to list clients irrespective + // of their status - both enabled and disabled. It is never stored in the + // database as the actual Client status and should always be the largest + // value in this enumeration. + AllStatus +) + +// String representation of the possible status values. +const ( + Disabled = "disabled" + Enabled = "enabled" + All = "all" + Unknown = "unknown" +) + +var ( + // ErrStatusAlreadyAssigned indicated that the client or group has already been assigned the status. + ErrStatusAlreadyAssigned = errors.New("status already assigned") +) + +// String converts client/group status to string literal. +func (s Status) String() string { + switch s { + case DisabledStatus: + return Disabled + case EnabledStatus: + return Enabled + case AllStatus: + return All + default: + return Unknown + } +} + +// ToStatus converts string value to a valid Client/Group status. +func ToStatus(status string) (Status, error) { + switch status { + case "", Enabled: + return EnabledStatus, nil + case Disabled: + return DisabledStatus, nil + case All: + return AllStatus, nil + } + return Status(0), apiutil.ErrInvalidStatus +} + +// Custom Marshaller for Client/Groups +func (s Status) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// Custom Unmarshaler for Client/Groups +func (s *Status) UnmarshalJSON(data []byte) error { + str := strings.Trim(string(data), "\"") + val, err := ToStatus(str) + *s = val + return err +} diff --git a/pkg/clients/types.go b/pkg/clients/types.go new file mode 100644 index 0000000000..2ca7cc2fa3 --- /dev/null +++ b/pkg/clients/types.go @@ -0,0 +1,4 @@ +package clients + +// Metadata represents arbitrary JSON. +type Metadata map[string]interface{} diff --git a/pkg/groups/errors.go b/pkg/groups/errors.go new file mode 100644 index 0000000000..1944c8ab01 --- /dev/null +++ b/pkg/groups/errors.go @@ -0,0 +1,17 @@ +package groups + +import "errors" + +var ( + // ErrInvalidStatus indicates invalid status. + ErrInvalidStatus = errors.New("invalid groups status") + + // ErrEnableGroup indicates error in enabling group. + ErrEnableGroup = errors.New("failed to enable group") + + // ErrDisableGroup indicates error in disabling group. + ErrDisableGroup = errors.New("failed to disable group") + + // ErrStatusAlreadyAssigned indicated that the group has already been assigned the status. + ErrStatusAlreadyAssigned = errors.New("status already assigned") +) diff --git a/pkg/groups/groups.go b/pkg/groups/groups.go new file mode 100644 index 0000000000..840ce4ca32 --- /dev/null +++ b/pkg/groups/groups.go @@ -0,0 +1,74 @@ +package groups + +import ( + "context" + "time" + + "github.com/mainflux/mainflux/pkg/clients" +) + +const ( + // MaxLevel represents the maximum group hierarchy level. + MaxLevel = uint64(5) + // MinLevel represents the minimum group hierarchy level. + MinLevel = uint64(0) +) + +// Group represents the group of Clients. +// Indicates a level in tree hierarchy. Root node is level 1. +// Path in a tree consisting of group IDs +// Paths are unique per owner. +type Group struct { + ID string `json:"id"` + Owner string `json:"owner_id"` + Parent string `json:"parent_id,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Metadata clients.Metadata `json:"metadata,omitempty"` + Level int `json:"level,omitempty"` + Path string `json:"path,omitempty"` + Children []*Group `json:"children,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Status clients.Status `json:"status"` +} + +// MembershipsPage contains page related metadata as well as list of memberships that +// belong to this page. +type MembershipsPage struct { + Page + Memberships []Group +} + +// GroupsPage contains page related metadata as well as list +// of Groups that belong to the page. +type GroupsPage struct { + Page + Path string + Level uint64 + ID string + Direction int64 // ancestors (-1) or descendants (+1) + Groups []Group +} + +// Repository specifies a group persistence API. +type Repository interface { + // Save group. + Save(ctx context.Context, g Group) (Group, error) + + // Update a group. + Update(ctx context.Context, g Group) (Group, error) + + // RetrieveByID retrieves group by its id. + RetrieveByID(ctx context.Context, id string) (Group, error) + + // RetrieveAll retrieves all groups. + RetrieveAll(ctx context.Context, gm GroupsPage) (GroupsPage, error) + + // Memberships retrieves everything that is assigned to a group identified by clientID. + Memberships(ctx context.Context, clientID string, gm GroupsPage) (MembershipsPage, error) + + // ChangeStatus changes groups status to active or inactive + ChangeStatus(ctx context.Context, group Group) (Group, error) +} diff --git a/pkg/groups/page.go b/pkg/groups/page.go new file mode 100644 index 0000000000..b9e0d2a1cd --- /dev/null +++ b/pkg/groups/page.go @@ -0,0 +1,19 @@ +package groups + +import "github.com/mainflux/mainflux/pkg/clients" + +// Page contains page metadata that helps navigation. +type Page struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + Name string `json:"name,omitempty"` + OwnerID string `json:"identity,omitempty"` + Tag string `json:"tag,omitempty"` + Metadata clients.Metadata `json:"metadata,omitempty"` + SharedBy string `json:"shared_by,omitempty"` + Status clients.Status `json:"status,omitempty"` + Subject string `json:"subject,omitempty"` + Action string `json:"action,omitempty"` + Disconnected bool `json:"disconnected,omitempty"` // Used for connected or disconnected lists +} diff --git a/pkg/messaging/brokers/brokers_nats.go b/pkg/messaging/brokers/brokers_nats.go index f9e8991f88..0b8617da2c 100644 --- a/pkg/messaging/brokers/brokers_nats.go +++ b/pkg/messaging/brokers/brokers_nats.go @@ -9,7 +9,7 @@ package brokers import ( "log" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/pkg/messaging/nats" ) @@ -30,7 +30,7 @@ func NewPublisher(url string) (messaging.Publisher, error) { } -func NewPubSub(url, queue string, logger logger.Logger) (messaging.PubSub, error) { +func NewPubSub(url, queue string, logger mflog.Logger) (messaging.PubSub, error) { pb, err := nats.NewPubSub(url, queue, logger) if err != nil { return nil, err diff --git a/pkg/messaging/brokers/brokers_rabbitmq.go b/pkg/messaging/brokers/brokers_rabbitmq.go index 35cd03e7ba..8c5755ba72 100644 --- a/pkg/messaging/brokers/brokers_rabbitmq.go +++ b/pkg/messaging/brokers/brokers_rabbitmq.go @@ -9,7 +9,7 @@ package brokers import ( "log" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/pkg/messaging/rabbitmq" ) @@ -29,7 +29,7 @@ func NewPublisher(url string) (messaging.Publisher, error) { return pb, nil } -func NewPubSub(url, queue string, logger logger.Logger) (messaging.PubSub, error) { +func NewPubSub(url, queue string, logger mflog.Logger) (messaging.PubSub, error) { pb, err := rabbitmq.NewPubSub(url, queue, logger) if err != nil { return nil, err diff --git a/pkg/messaging/mqtt/pubsub.go b/pkg/messaging/mqtt/pubsub.go index 8b4df41c19..ab4cb946ad 100644 --- a/pkg/messaging/mqtt/pubsub.go +++ b/pkg/messaging/mqtt/pubsub.go @@ -11,7 +11,7 @@ import ( "time" mqtt "github.com/eclipse/paho.mqtt.golang" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "google.golang.org/protobuf/proto" ) @@ -54,7 +54,7 @@ type subscription struct { type pubsub struct { publisher - logger log.Logger + logger mflog.Logger mu sync.RWMutex address string timeout time.Duration @@ -62,7 +62,7 @@ type pubsub struct { } // NewPubSub returns MQTT message publisher/subscriber. -func NewPubSub(url, queue string, timeout time.Duration, logger log.Logger) (messaging.PubSub, error) { +func NewPubSub(url, queue string, timeout time.Duration, logger mflog.Logger) (messaging.PubSub, error) { client, err := newClient(url, "mqtt-publisher", timeout) if err != nil { return nil, err diff --git a/pkg/messaging/mqtt/setup_test.go b/pkg/messaging/mqtt/setup_test.go index f175767020..6683172267 100644 --- a/pkg/messaging/mqtt/setup_test.go +++ b/pkg/messaging/mqtt/setup_test.go @@ -13,7 +13,7 @@ import ( "time" mqtt "github.com/eclipse/paho.mqtt.golang" - mainflux_log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" mqtt_pubsub "github.com/mainflux/mainflux/pkg/messaging/mqtt" "github.com/ory/dockertest/v3" @@ -21,7 +21,7 @@ import ( var ( pubsub messaging.PubSub - logger mainflux_log.Logger + logger mflog.Logger address string ) @@ -51,7 +51,7 @@ func TestMain(m *testing.M) { address = fmt.Sprintf("%s:%s", "localhost", container.GetPort(port)) pool.MaxWait = poolMaxWait - logger, err = mainflux_log.New(os.Stdout, mainflux_log.Debug.String()) + logger, err = mflog.New(os.Stdout, mflog.Debug.String()) if err != nil { log.Fatalf(err.Error()) } diff --git a/pkg/messaging/nats/pubsub.go b/pkg/messaging/nats/pubsub.go index ebfce5a7b3..d982ffdd64 100644 --- a/pkg/messaging/nats/pubsub.go +++ b/pkg/messaging/nats/pubsub.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/proto" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" broker "github.com/nats-io/nats.go" ) @@ -34,7 +34,7 @@ type subscription struct { type pubsub struct { publisher - logger log.Logger + logger mflog.Logger mu sync.Mutex queue string subscriptions map[string]map[string]subscription @@ -47,7 +47,7 @@ type pubsub struct { // from ordinary subscribe. For more information, please take a look // here: https://docs.nats.io/developing-with-nats/receiving/queues. // If the queue is empty, Subscribe will be used. -func NewPubSub(url, queue string, logger log.Logger) (messaging.PubSub, error) { +func NewPubSub(url, queue string, logger mflog.Logger) (messaging.PubSub, error) { conn, err := broker.Connect(url, broker.MaxReconnects(maxReconnects)) if err != nil { return nil, err diff --git a/pkg/messaging/nats/setup_test.go b/pkg/messaging/nats/setup_test.go index 9a082be5c4..f8f35b8490 100644 --- a/pkg/messaging/nats/setup_test.go +++ b/pkg/messaging/nats/setup_test.go @@ -11,7 +11,7 @@ import ( "syscall" "testing" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/pkg/messaging/nats" dockertest "github.com/ory/dockertest/v3" @@ -42,7 +42,7 @@ func TestMain(m *testing.M) { log.Fatalf("Could not connect to docker: %s", err) } - logger, err := logger.New(os.Stdout, "error") + logger, err := mflog.New(os.Stdout, "error") if err != nil { log.Fatalf(err.Error()) } diff --git a/pkg/messaging/rabbitmq/pubsub.go b/pkg/messaging/rabbitmq/pubsub.go index 69c8d5e7ec..5c313c38c1 100644 --- a/pkg/messaging/rabbitmq/pubsub.go +++ b/pkg/messaging/rabbitmq/pubsub.go @@ -9,7 +9,7 @@ import ( "fmt" "sync" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" amqp "github.com/rabbitmq/amqp091-go" "google.golang.org/protobuf/proto" @@ -39,13 +39,13 @@ type subscription struct { } type pubsub struct { publisher - logger log.Logger + logger mflog.Logger subscriptions map[string]map[string]subscription mu sync.Mutex } // NewPubSub returns RabbitMQ message publisher/subscriber. -func NewPubSub(url, queue string, logger log.Logger) (messaging.PubSub, error) { +func NewPubSub(url, queue string, logger mflog.Logger) (messaging.PubSub, error) { conn, err := amqp.Dial(url) if err != nil { return nil, err diff --git a/pkg/messaging/tracing/publisher.go b/pkg/messaging/tracing/publisher.go index 777a2c9b75..4a66c2894b 100644 --- a/pkg/messaging/tracing/publisher.go +++ b/pkg/messaging/tracing/publisher.go @@ -4,50 +4,51 @@ import ( "context" "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -// traced ops. +// Traced operations const publishOP = "publish_op" var _ messaging.Publisher = (*publisherMiddleware)(nil) type publisherMiddleware struct { publisher messaging.Publisher - tracer opentracing.Tracer + tracer trace.Tracer } // New creates new messaging publisher tracing middleware. -func New(tracer opentracing.Tracer, publisher messaging.Publisher) messaging.Publisher { +func New(tracer trace.Tracer, publisher messaging.Publisher) messaging.Publisher { return &publisherMiddleware{ publisher: publisher, tracer: tracer, } } -// Publish traces nats publish operations. +// Publish traces NATS publish operations. func (pm *publisherMiddleware) Publish(ctx context.Context, topic string, msg *messaging.Message) error { - span, ctx := createSpan(ctx, publishOP, topic, msg.Subtopic, msg.Publisher, pm.tracer) - defer span.Finish() + ctx, span := createSpan(ctx, publishOP, topic, msg.Subtopic, msg.Publisher, pm.tracer) + defer span.End() return pm.publisher.Publish(ctx, topic, msg) } -// Close nats trace publisher middleware +// Close NATS trace publisher middleware. func (pm *publisherMiddleware) Close() error { return pm.publisher.Close() } -func createSpan(ctx context.Context, operation, topic, subTopic, thingID string, tracer opentracing.Tracer) (opentracing.Span, context.Context) { - span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, operation) +func createSpan(ctx context.Context, operation, topic, subTopic, thingID string, tracer trace.Tracer) (context.Context, trace.Span) { + kvOpts := []attribute.KeyValue{} switch operation { case publishOP: - span.SetTag("publisher", thingID) + kvOpts = append(kvOpts, attribute.String("publisher", thingID)) default: - span.SetTag("subscriber", thingID) + kvOpts = append(kvOpts, attribute.String("subscriber", thingID)) } - span.SetTag("topic", topic) + kvOpts = append(kvOpts, attribute.String("topic", topic)) if subTopic != "" { - span.SetTag("sub-topic", subTopic) + kvOpts = append(kvOpts, attribute.String("subtopic", topic)) } - return span, ctx + return tracer.Start(ctx, operation, trace.WithAttributes(kvOpts...)) } diff --git a/pkg/messaging/tracing/pubsub.go b/pkg/messaging/tracing/pubsub.go index eadbd09124..d9996647dd 100644 --- a/pkg/messaging/tracing/pubsub.go +++ b/pkg/messaging/tracing/pubsub.go @@ -4,7 +4,7 @@ import ( "context" "github.com/mainflux/mainflux/pkg/messaging" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) // Constants to define different operations to be traced. @@ -19,25 +19,23 @@ var _ messaging.PubSub = (*pubsubMiddleware)(nil) type pubsubMiddleware struct { publisherMiddleware pubsub messaging.PubSub - tracer opentracing.Tracer } // NewPubSub creates a new pubsub middleware that traces pubsub operations. -func NewPubSub(tracer opentracing.Tracer, pubsub messaging.PubSub) messaging.PubSub { +func NewPubSub(tracer trace.Tracer, pubsub messaging.PubSub) messaging.PubSub { return &pubsubMiddleware{ publisherMiddleware: publisherMiddleware{ publisher: pubsub, tracer: tracer, }, pubsub: pubsub, - tracer: tracer, } } // Subscribe creates a new subscription and traces the operation. func (pm *pubsubMiddleware) Subscribe(ctx context.Context, id string, topic string, handler messaging.MessageHandler) error { - span, ctx := createSpan(ctx, subscribeOP, topic, "", id, pm.tracer) - defer span.Finish() + ctx, span := createSpan(ctx, subscribeOP, topic, "", id, pm.tracer) + defer span.End() h := &traceHandler{ handler: handler, tracer: pm.tracer, @@ -48,27 +46,27 @@ func (pm *pubsubMiddleware) Subscribe(ctx context.Context, id string, topic stri // Unsubscribe removes an existing subscription and traces the operation. func (pm *pubsubMiddleware) Unsubscribe(ctx context.Context, id string, topic string) error { - span, ctx := createSpan(ctx, unsubscribeOp, topic, "", id, pm.tracer) - defer span.Finish() + ctx, span := createSpan(ctx, unsubscribeOp, topic, "", id, pm.tracer) + defer span.End() return pm.pubsub.Unsubscribe(ctx, id, topic) } -// traceHandler is used to trace the message handling operation +// TraceHandler is used to trace the message handling operation. type traceHandler struct { handler messaging.MessageHandler - tracer opentracing.Tracer + tracer trace.Tracer ctx context.Context topic string } -// Handle instruments the message handling operation +// Handle instruments the message handling operation. func (h *traceHandler) Handle(msg *messaging.Message) error { - span, _ := createSpan(h.ctx, handleOp, h.topic, msg.Subtopic, msg.Publisher, h.tracer) - defer span.Finish() + _, span := createSpan(h.ctx, handleOp, h.topic, msg.Subtopic, msg.Publisher, h.tracer) + defer span.End() return h.handler.Handle(msg) } -// Cancel cancels the message handling operation +// Cancel cancels the message handling operation. func (h *traceHandler) Cancel() error { return h.handler.Cancel() } diff --git a/pkg/sdk/go/certs_test.go b/pkg/sdk/go/certs_test.go deleted file mode 100644 index 3654299801..0000000000 --- a/pkg/sdk/go/certs_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package sdk_test - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/mainflux/mainflux" - bsmocks "github.com/mainflux/mainflux/bootstrap/mocks" - "github.com/mainflux/mainflux/certs" - httpapi "github.com/mainflux/mainflux/certs/api" - "github.com/mainflux/mainflux/certs/mocks" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/things" - thmocks "github.com/mainflux/mainflux/things/mocks" -) - -var ( - thingsNum = 1 - thingKey = "thingKey" - thingID = "1" - caPath = "../../../docker/ssl/certs/ca.crt" - caKeyPath = "../../../docker/ssl/certs/ca.key" - cfgAuthTimeout = "1s" - cfgSignHoursValid = "24h" -) - -func newCertsThingsService(auth mainflux.AuthServiceClient) things.Service { - ths := make(map[string]things.Thing, thingsNum) - for i := 0; i < thingsNum; i++ { - id := strconv.Itoa(i + 1) - ths[id] = things.Thing{ - ID: id, - Key: thingKey, - Owner: email, - } - } - - return bsmocks.NewThingsService(ths, map[string]things.Channel{}, auth) -} - -func newCertService() (certs.Service, error) { - ac := bsmocks.NewAuthClient(map[string]string{token: email}) - server := newThingsServer(newCertsThingsService(ac)) - - policies := []thmocks.MockSubjectSet{{Object: "users", Relation: "member"}} - auth := thmocks.NewAuthService(map[string]string{token: email}, map[string][]thmocks.MockSubjectSet{email: policies}) - config := sdk.Config{ - ThingsURL: server.URL, - } - - mfsdk := sdk.NewSDK(config) - repo := mocks.NewCertsRepository() - - tlsCert, caCert, err := certs.LoadCertificates(caPath, caKeyPath) - if err != nil { - return nil, err - } - - authTimeout, err := time.ParseDuration(cfgAuthTimeout) - if err != nil { - return nil, err - } - - pki := mocks.NewPkiAgent(tlsCert, caCert, cfgSignHoursValid, authTimeout) - - return certs.New(auth, repo, mfsdk, pki), nil -} - -func newCertServer(svc certs.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, logger) - return httptest.NewServer(mux) -} - -func TestIssueCert(t *testing.T) { - svc, err := newCertService() - require.Nil(t, err, fmt.Sprintf("unexpected error during creating service: %s", err)) - ts := newCertServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - CertsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - cases := []struct { - desc string - thingID string - duration string - token string - err errors.SDKError - }{ - { - desc: "create new cert with thing id and duration", - thingID: thingID, - duration: "10h", - token: token, - err: nil, - }, - { - desc: "create new cert with empty thing id and duration", - thingID: "", - duration: "10h", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "create new cert with invalid thing id and duration", - thingID: "ah", - duration: "10h", - token: token, - err: errors.NewSDKErrorWithStatus(certs.ErrFailedCertCreation, http.StatusInternalServerError), - }, - { - desc: "create new cert with thing id and empty duration", - thingID: thingID, - duration: "", - token: exampleUser1, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingCertData, http.StatusBadRequest), - }, - { - desc: "create new cert with thing id and malformed duration", - thingID: thingID, - duration: "10g", - token: exampleUser1, - err: errors.NewSDKErrorWithStatus(apiutil.ErrInvalidCertData, http.StatusBadRequest), - }, - { - desc: "create new cert with empty token", - thingID: thingID, - duration: "10h", - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "create new cert with invalid token", - thingID: thingID, - duration: "10h", - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "create new empty cert", - thingID: "", - duration: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - } - - for _, tc := range cases { - cert, err := mainfluxSDK.IssueCert(tc.thingID, tc.duration, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - if err == nil { - assert.NotEmpty(t, cert, fmt.Sprintf("%s: got empty cert", tc.desc)) - } - } -} - -func TestViewCert(t *testing.T) { - svc, err := newCertService() - require.Nil(t, err, fmt.Sprintf("unexpected error during creating service: %s", err)) - ts := newCertServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - CertsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - cert, err := mainfluxSDK.IssueCert(thingID, "10h", token) - require.Nil(t, err, fmt.Sprintf("unexpected error during creating cert: %s", err)) - - cases := []struct { - desc string - certID string - token string - err errors.SDKError - response sdk.Subscription - }{ - { - desc: "get existing cert", - certID: cert.CertSerial, - token: token, - err: nil, - response: sub1, - }, - { - desc: "get non-existent cert", - certID: "43", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusInternalServerError), - response: sdk.Subscription{}, - }, - { - desc: "get cert with invalid token", - certID: cert.CertSerial, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: sdk.Subscription{}, - }, - } - - for _, tc := range cases { - cert, err := mainfluxSDK.ViewCert(tc.certID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - if err == nil { - assert.NotEmpty(t, cert, fmt.Sprintf("%s: got empty cert", tc.desc)) - } - } -} - -func TestViewCertByThing(t *testing.T) { - svc, err := newCertService() - require.Nil(t, err, fmt.Sprintf("unexpected error during creating service: %s", err)) - ts := newCertServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - CertsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - _, err = mainfluxSDK.IssueCert(thingID, "10h", token) - require.Nil(t, err, fmt.Sprintf("unexpected error during creating cert: %s", err)) - - cases := []struct { - desc string - thingID string - token string - err errors.SDKError - response sdk.Subscription - }{ - { - desc: "get existing cert", - thingID: thingID, - token: token, - err: nil, - response: sub1, - }, - { - desc: "get non-existent cert", - thingID: "43", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusInternalServerError), - response: sdk.Subscription{}, - }, - { - desc: "get cert with invalid token", - thingID: thingID, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: sdk.Subscription{}, - }, - } - - for _, tc := range cases { - cert, err := mainfluxSDK.ViewCertByThing(tc.thingID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - if err == nil { - assert.NotEmpty(t, cert, fmt.Sprintf("%s: got empty cert", tc.desc)) - } - } -} - -func TestRevokeCert(t *testing.T) { - svc, err := newCertService() - require.Nil(t, err, fmt.Sprintf("unexpected error during creating service: %s", err)) - ts := newCertServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - CertsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - _, err = mainfluxSDK.IssueCert(thingID, "10h", token) - require.Nil(t, err, fmt.Sprintf("unexpected error during creating cert: %s", err)) - - cases := []struct { - desc string - thingID string - token string - err errors.SDKError - }{ - { - desc: "revoke cert with invalid token", - thingID: thingID, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "revoke non-existing cert", - thingID: "2", - token: token, - err: errors.NewSDKErrorWithStatus(certs.ErrFailedCertRevocation, http.StatusInternalServerError), - }, - { - desc: "revoke cert with invalid id", - thingID: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "revoke cert with empty token", - thingID: thingID, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "revoke existing cert", - thingID: thingID, - token: token, - err: nil, - }, - { - desc: "revoke deleted cert", - thingID: thingID, - token: token, - err: errors.NewSDKErrorWithStatus(certs.ErrFailedToRemoveCertFromDB, http.StatusInternalServerError), - }, - } - - for _, tc := range cases { - response, err := mainfluxSDK.RevokeCert(tc.thingID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - if err == nil { - assert.NotEmpty(t, response, fmt.Sprintf("%s: got empty revocation time", tc.desc)) - } - } -} diff --git a/pkg/sdk/go/channels.go b/pkg/sdk/go/channels.go index 6b178a3e6f..e51f72bed3 100644 --- a/pkg/sdk/go/channels.go +++ b/pkg/sdk/go/channels.go @@ -7,27 +7,47 @@ import ( "encoding/json" "fmt" "net/http" - "strings" + "time" "github.com/mainflux/mainflux/pkg/errors" ) +// Channel represents mainflux channel. +type Channel struct { + ID string `json:"id"` + OwnerID string `json:"owner_id,omitempty"` + ParentID string `json:"parent_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Level int `json:"level,omitempty"` + Path string `json:"path,omitempty"` + Children []*Channel `json:"children,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + Status string `json:"status,omitempty"` +} + const channelsEndpoint = "channels" -func (sdk mfSDK) CreateChannel(c Channel, token string) (string, errors.SDKError) { +func (sdk mfSDK) CreateChannel(c Channel, token string) (Channel, errors.SDKError) { data, err := json.Marshal(c) if err != nil { - return "", errors.NewSDKError(err) + return Channel{}, errors.NewSDKError(err) } url := fmt.Sprintf("%s/%s", sdk.thingsURL, channelsEndpoint) - headers, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) if sdkerr != nil { - return "", sdkerr + return Channel{}, sdkerr } - id := strings.TrimPrefix(headers.Get("Location"), fmt.Sprintf("/%s/", channelsEndpoint)) - return id, nil + c = Channel{} + if err := json.Unmarshal(body, &c); err != nil { + return Channel{}, errors.NewSDKError(err) + } + + return c, nil } func (sdk mfSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors.SDKError) { @@ -38,7 +58,7 @@ func (sdk mfSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors. url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, channelsEndpoint, "bulk") - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) if sdkerr != nil { return []Channel{}, sdkerr } @@ -52,10 +72,8 @@ func (sdk mfSDK) CreateChannels(chs []Channel, token string) ([]Channel, errors. } func (sdk mfSDK) Channels(pm PageMetadata, token string) (ChannelsPage, errors.SDKError) { - var url string - var err error - - if url, err = sdk.withQueryParams(sdk.thingsURL, channelsEndpoint, pm); err != nil { + url, err := sdk.withQueryParams(sdk.thingsURL, channelsEndpoint, pm) + if err != nil { return ChannelsPage{}, errors.NewSDKError(err) } @@ -106,21 +124,48 @@ func (sdk mfSDK) Channel(id, token string) (Channel, errors.SDKError) { return c, nil } -func (sdk mfSDK) UpdateChannel(c Channel, token string) errors.SDKError { +func (sdk mfSDK) UpdateChannel(c Channel, token string) (Channel, errors.SDKError) { data, err := json.Marshal(c) if err != nil { - return errors.NewSDKError(err) + return Channel{}, errors.NewSDKError(err) } url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, channelsEndpoint, c.ID) - _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr + _, body, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return Channel{}, sdkerr + } + + c = Channel{} + if err := json.Unmarshal(body, &c); err != nil { + return Channel{}, errors.NewSDKError(err) + } + + return c, nil } -func (sdk mfSDK) DeleteChannel(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, channelsEndpoint, id) +// EnableChannel enables the channel identified with the provided ID. +func (sdk mfSDK) EnableChannel(id, token string) (Channel, errors.SDKError) { + return sdk.changeChannelStatus(id, enableEndpoint, token) +} + +// DisableChannel enabled the channel identified with the provided ID. +func (sdk mfSDK) DisableChannel(id, token string) (Channel, errors.SDKError) { + return sdk.changeChannelStatus(id, disableEndpoint, token) +} + +func (sdk mfSDK) changeChannelStatus(id, status, token string) (Channel, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, id, status) - _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) - return err + _, body, err := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusOK) + if err != nil { + return Channel{}, err + } + c := Channel{} + if err := json.Unmarshal(body, &c); err != nil { + return Channel{}, errors.NewSDKError(err) + } + + return c, nil } diff --git a/pkg/sdk/go/channels_test.go b/pkg/sdk/go/channels_test.go deleted file mode 100644 index ce3c0f31aa..0000000000 --- a/pkg/sdk/go/channels_test.go +++ /dev/null @@ -1,572 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package sdk_test - -import ( - "fmt" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" -) - -var ( - ch1 = sdk.Channel{Name: "test1"} - ch2 = sdk.Channel{ID: "fe6b4e92-cc98-425e-b0aa-000000000001", Name: "test1"} - ch3 = sdk.Channel{ID: "fe6b4e92-cc98-425e-b0aa-000000000002", Name: "test2"} - chPrefix = "fe6b4e92-cc98-425e-b0aa-" - emptyChannel = sdk.Channel{} -) - -func TestCreateChannel(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - chWrongExtID := sdk.Channel{ID: "b0aa-000000000001", Name: "1", Metadata: metadata} - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - cases := []struct { - desc string - channel sdk.Channel - token string - err errors.SDKError - empty bool - }{ - { - desc: "create new channel", - channel: ch1, - token: token, - err: nil, - empty: false, - }, - { - desc: "create new channel with empty token", - channel: ch1, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - empty: true, - }, - { - desc: "create new channel with invalid token", - channel: ch1, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - empty: true, - }, - { - desc: "create new empty channel", - channel: emptyChannel, - token: token, - err: nil, - empty: false, - }, - { - desc: "create a new channel with external UUID", - channel: ch2, - token: token, - err: nil, - empty: false, - }, - { - desc: "create a new channel with wrong external UUID", - channel: chWrongExtID, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrInvalidIDFormat, http.StatusBadRequest), - empty: true, - }, - } - - for _, tc := range cases { - loc, err := mainfluxSDK.CreateChannel(tc.channel, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.empty, loc == "", fmt.Sprintf("%s: expected empty result location, got: %s", tc.desc, loc)) - } -} - -func TestCreateChannels(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - channels := []sdk.Channel{ - ch2, - ch3, - } - - cases := []struct { - desc string - channels []sdk.Channel - token string - err errors.SDKError - res []sdk.Channel - }{ - { - desc: "create new channels", - channels: channels, - token: token, - err: nil, - res: channels, - }, - { - desc: "create new channels with empty channels", - channels: []sdk.Channel{}, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrEmptyList, http.StatusBadRequest), - res: []sdk.Channel{}, - }, - { - desc: "create new channels with empty token", - channels: channels, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - res: []sdk.Channel{}, - }, - { - desc: "create new channels with invalid token", - channels: channels, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - res: []sdk.Channel{}, - }, - } - for _, tc := range cases { - res, err := mainfluxSDK.CreateChannels(tc.channels, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - - for idx := range tc.res { - assert.Equal(t, tc.res[idx].ID, res[idx].ID, fmt.Sprintf("%s: expected response ID %s got %s", tc.desc, tc.res[idx].ID, res[idx].ID)) - } - } -} - -func TestChannel(t *testing.T) { - svc := newThingsService(map[string]string{token: adminEmail}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateChannel(ch2, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - chanID string - token string - err errors.SDKError - response sdk.Channel - }{ - { - desc: "get existing channel", - chanID: id, - token: token, - err: nil, - response: ch2, - }, - { - desc: "get non-existent channel", - chanID: "43", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - response: sdk.Channel{}, - }, - { - desc: "get channel with invalid token", - chanID: id, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: sdk.Channel{}, - }, - } - - for _, tc := range cases { - respCh, err := mainfluxSDK.Channel(tc.chanID, tc.token) - - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, respCh, fmt.Sprintf("%s: expected response channel %s, got %s", tc.desc, tc.response, respCh)) - } -} - -func TestChannels(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - var channels []sdk.Channel - mainfluxSDK := sdk.NewSDK(sdkConf) - for i := 1; i < 101; i++ { - id := fmt.Sprintf("%s%012d", chPrefix, i) - name := fmt.Sprintf("test-%d", i) - ch := sdk.Channel{ID: id, Name: name} - _, err := mainfluxSDK.CreateChannel(ch, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - channels = append(channels, ch) - } - - cases := []struct { - desc string - token string - offset uint64 - limit uint64 - name string - err errors.SDKError - response []sdk.Channel - metadata map[string]interface{} - }{ - { - desc: "get a list of channels", - token: token, - offset: offset, - limit: limit, - err: nil, - response: channels[0:limit], - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of channels with invalid token", - token: wrongValue, - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of channels with empty token", - token: "", - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of channels without limit, default 10", - token: token, - offset: offset, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of channels with limit greater than max", - token: token, - offset: offset, - limit: 110, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of channels with offset greater than max", - token: token, - offset: 110, - limit: limit, - err: nil, - response: []sdk.Channel{}, - metadata: make(map[string]interface{}), - }, - } - for _, tc := range cases { - filter := sdk.PageMetadata{ - Name: tc.name, - Total: total, - Offset: uint64(tc.offset), - Limit: uint64(tc.limit), - Metadata: tc.metadata, - } - - page, err := mainfluxSDK.Channels(filter, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, page.Channels, fmt.Sprintf("%s: got incorrect channels list from from Channels()", tc.desc)) - } -} - -func TestChannelsByThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - mainfluxSDK := sdk.NewSDK(sdkConf) - - th := sdk.Thing{Name: "test_device"} - tid, err := mainfluxSDK.CreateThing(th, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - var n = 100 - var chsDiscoNum = 1 - var channels []sdk.Channel - for i := 1; i < n+1; i++ { - id := fmt.Sprintf("%s%012d", chPrefix, i) - name := fmt.Sprintf("test-%d", i) - ch := sdk.Channel{ID: id, Name: name} - cid, err := mainfluxSDK.CreateChannel(ch, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - channels = append(channels, ch) - - // Don't connect last Channel - if i == n+1-chsDiscoNum { - break - } - - conIDs := sdk.ConnectionIDs{ - ChannelIDs: []string{cid}, - ThingIDs: []string{tid}, - } - err = mainfluxSDK.Connect(conIDs, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - cases := []struct { - desc string - thing string - token string - offset uint64 - limit uint64 - disconnected bool - err errors.SDKError - response []sdk.Channel - }{ - { - desc: "get a list of channels by thing", - thing: tid, - token: token, - offset: offset, - limit: limit, - err: nil, - response: channels[0:limit], - }, - { - desc: "get a list of channels by thing with invalid token", - thing: tid, - token: wrongValue, - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: nil, - }, - { - desc: "get a list of channels by thing with empty token", - thing: tid, - token: "", - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: nil, - }, - { - desc: "get a list of channels by thing with zero limit", - thing: tid, - token: token, - offset: offset, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of channels by thing with limit greater than max", - thing: tid, - token: token, - offset: offset, - limit: 110, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of channels by thing with offset greater than max", - thing: tid, - token: token, - offset: 110, - limit: limit, - err: nil, - response: []sdk.Channel{}, - }, - { - desc: "get a list of channels by thing with invalid args (zero limit) and invalid token", - thing: tid, - token: wrongValue, - offset: offset, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of not connected channels by thing", - thing: tid, - token: token, - offset: offset, - limit: 100, - disconnected: true, - err: nil, - response: []sdk.Channel{channels[n-chsDiscoNum]}, - }, - } - - for _, tc := range cases { - pm := sdk.PageMetadata{ - Offset: tc.offset, - Limit: tc.limit, - Disconnected: tc.disconnected, - } - page, err := mainfluxSDK.ChannelsByThing(tc.thing, pm, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, page.Channels, fmt.Sprintf("%s: got incorrect channels list from from ChannelsByThing()", tc.desc)) - } -} - -func TestUpdateChannel(t *testing.T) { - svc := newThingsService(map[string]string{token: adminEmail}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateChannel(ch2, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error %s", err)) - - cases := []struct { - desc string - channel sdk.Channel - token string - err errors.SDKError - }{ - { - desc: "update existing channel", - channel: sdk.Channel{ID: id, Name: "test2"}, - token: token, - err: nil, - }, - { - desc: "update non-existing channel", - channel: sdk.Channel{ID: "0", Name: "test2"}, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "update channel with invalid id", - channel: sdk.Channel{ID: "", Name: "test2"}, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "update channel with invalid token", - channel: sdk.Channel{ID: id, Name: "test2"}, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "update channel with empty token", - channel: sdk.Channel{ID: id, Name: "test2"}, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - } - - for _, tc := range cases { - err := mainfluxSDK.UpdateChannel(tc.channel, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestDeleteChannel(t *testing.T) { - svc := newThingsService(map[string]string{token: adminEmail}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateChannel(ch2, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - chanID string - token string - err errors.SDKError - }{ - { - desc: "delete channel with invalid token", - chanID: id, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "delete non-existing channel", - chanID: "2", - token: token, - err: nil, - }, - { - desc: "delete channel with invalid id", - chanID: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "delete channel with empty token", - chanID: id, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "delete existing channel", - chanID: id, - token: token, - err: nil, - }, - { - desc: "delete deleted channel", - chanID: id, - token: token, - err: nil, - }, - } - - for _, tc := range cases { - err := mainfluxSDK.DeleteChannel(tc.chanID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} diff --git a/pkg/sdk/go/consumers_test.go b/pkg/sdk/go/consumers_test.go index 940cf3ff52..5d0c256b8c 100644 --- a/pkg/sdk/go/consumers_test.go +++ b/pkg/sdk/go/consumers_test.go @@ -9,7 +9,6 @@ import ( "net/http/httptest" "testing" - "github.com/opentracing/opentracing-go/mocktracer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +22,8 @@ import ( "github.com/mainflux/mainflux/pkg/uuid" ) +const wrongValue = "wrong_value" + var ( sub1 = sdk.Subscription{ Topic: "topic", @@ -45,7 +46,7 @@ func newSubscriptionService() notifiers.Service { func newSubscriptionServer(svc notifiers.Service) *httptest.Server { logger := logger.NewMock() - mux := httpapi.MakeHandler(svc, mocktracer.New(), logger) + mux := httpapi.MakeHandler(svc, logger) return httptest.NewServer(mux) } diff --git a/pkg/sdk/go/groups.go b/pkg/sdk/go/groups.go index 5ee22758b3..f009a1b49d 100644 --- a/pkg/sdk/go/groups.go +++ b/pkg/sdk/go/groups.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" "net/http" - "strings" + "time" "github.com/mainflux/mainflux/pkg/errors" ) @@ -18,83 +18,65 @@ const ( MinLevel = uint64(1) ) -func (sdk mfSDK) CreateGroup(g Group, token string) (string, errors.SDKError) { +// Group represents the group of Clients. +// Indicates a level in tree hierarchy. Root node is level 1. +// Path in a tree consisting of group IDs +// Paths are unique per owner. +type Group struct { + ID string `json:"id"` + OwnerID string `json:"owner_id,omitempty"` + ParentID string `json:"parent_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Level int `json:"level,omitempty"` + Path string `json:"path,omitempty"` + Children []*Group `json:"children,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + Status string `json:"status,omitempty"` +} + +func (sdk mfSDK) CreateGroup(g Group, token string) (Group, errors.SDKError) { data, err := json.Marshal(g) if err != nil { - return "", errors.NewSDKError(err) + return Group{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.authURL, groupsEndpoint) + url := fmt.Sprintf("%s/%s", sdk.usersURL, groupsEndpoint) - headers, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) if sdkerr != nil { - return "", sdkerr - } - - id := strings.TrimPrefix(headers.Get("Location"), fmt.Sprintf("/%s/", groupsEndpoint)) - return id, nil -} - -func (sdk mfSDK) DeleteGroup(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, id) - _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) - return err -} - -func (sdk mfSDK) Assign(memberIDs []string, memberType, groupID, token string) errors.SDKError { - var ids []string - url := fmt.Sprintf("%s/%s/%s/members", sdk.authURL, groupsEndpoint, groupID) - ids = append(ids, memberIDs...) - assignReq := assignRequest{ - Type: memberType, - Members: ids, + return Group{}, sdkerr } - data, err := json.Marshal(assignReq) - if err != nil { - return errors.NewSDKError(err) + g = Group{} + if err := json.Unmarshal(body, &g); err != nil { + return Group{}, errors.NewSDKError(err) } - - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr + return g, nil } -func (sdk mfSDK) Unassign(groupID string, memberIDs []string, token string) errors.SDKError { - var ids []string - url := fmt.Sprintf("%s/%s/%s/members", sdk.authURL, groupsEndpoint, groupID) - ids = append(ids, memberIDs...) - assignReq := assignRequest{ - Members: ids, - } - - data, err := json.Marshal(assignReq) +func (sdk mfSDK) Memberships(clientID string, pm PageMetadata, token string) (MembershipsPage, errors.SDKError) { + url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, clientID), "memberships", pm) if err != nil { - return errors.NewSDKError(err) + return MembershipsPage{}, errors.NewSDKError(err) } - _, _, sdkerr := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), data, http.StatusNoContent) - return sdkerr -} - -func (sdk mfSDK) Members(groupID string, pm PageMetadata, token string) (MembersPage, errors.SDKError) { - url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, groupID), "members", pm) - if err != nil { - return MembersPage{}, errors.NewSDKError(err) - } _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) if sdkerr != nil { - return MembersPage{}, sdkerr + return MembershipsPage{}, sdkerr } - var tp MembersPage + var tp MembershipsPage if err := json.Unmarshal(body, &tp); err != nil { - return MembersPage{}, errors.NewSDKError(err) + return MembershipsPage{}, errors.NewSDKError(err) } return tp, nil } func (sdk mfSDK) Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKError) { - url, err := sdk.withQueryParams(sdk.authURL, groupsEndpoint, pm) + url, err := sdk.withQueryParams(sdk.usersURL, groupsEndpoint, pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) } @@ -103,7 +85,7 @@ func (sdk mfSDK) Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKEr func (sdk mfSDK) Parents(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { pm.Level = MaxLevel - url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, id), "parents", pm) + url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id), "parents", pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) } @@ -112,7 +94,7 @@ func (sdk mfSDK) Parents(id string, pm PageMetadata, token string) (GroupsPage, func (sdk mfSDK) Children(id string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { pm.Level = MaxLevel - url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, id), "children", pm) + url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id), "children", pm) if err != nil { return GroupsPage{}, errors.NewSDKError(err) } @@ -133,7 +115,7 @@ func (sdk mfSDK) getGroups(url, token string) (GroupsPage, errors.SDKError) { } func (sdk mfSDK) Group(id, token string) (Group, errors.SDKError) { - url := fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, id) + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, id) _, body, err := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) if err != nil { return Group{}, err @@ -147,32 +129,46 @@ func (sdk mfSDK) Group(id, token string) (Group, errors.SDKError) { return t, nil } -func (sdk mfSDK) UpdateGroup(t Group, token string) errors.SDKError { - data, err := json.Marshal(t) +func (sdk mfSDK) UpdateGroup(g Group, token string) (Group, errors.SDKError) { + data, err := json.Marshal(g) if err != nil { - return errors.NewSDKError(err) + return Group{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/%s", sdk.authURL, groupsEndpoint, t.ID) - _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, groupsEndpoint, g.ID) + _, body, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return Group{}, sdkerr + } + + g = Group{} + if err := json.Unmarshal(body, &g); err != nil { + return Group{}, errors.NewSDKError(err) + } - return sdkerr + return g, nil } -func (sdk mfSDK) Memberships(memberID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) { - url, err := sdk.withQueryParams(fmt.Sprintf("%s/%s/%s", sdk.authURL, membersEndpoint, memberID), groupsEndpoint, pm) +// EnableGroup removes the group identified with the provided ID. +func (sdk mfSDK) EnableGroup(id, token string) (Group, errors.SDKError) { + return sdk.changeGroupStatus(id, enableEndpoint, token) +} + +// DisableGroup removes the group identified with the provided ID. +func (sdk mfSDK) DisableGroup(id, token string) (Group, errors.SDKError) { + return sdk.changeGroupStatus(id, disableEndpoint, token) +} + +func (sdk mfSDK) changeGroupStatus(id, status, token string) (Group, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, groupsEndpoint, id, status) + _, body, err := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusOK) if err != nil { - return GroupsPage{}, errors.NewSDKError(err) - } - _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) - if sdkerr != nil { - return GroupsPage{}, sdkerr + return Group{}, err } - - var tp GroupsPage - if err := json.Unmarshal(body, &tp); err != nil { - return GroupsPage{}, errors.NewSDKError(err) + g := Group{} + if err := json.Unmarshal(body, &g); err != nil { + return Group{}, errors.NewSDKError(err) } - return tp, nil + return g, nil } diff --git a/pkg/sdk/go/groups_test.go b/pkg/sdk/go/groups_test.go new file mode 100644 index 0000000000..02cc136cfc --- /dev/null +++ b/pkg/sdk/go/groups_test.go @@ -0,0 +1,764 @@ +package sdk_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/testsutil" + "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + sdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/users/clients" + cmocks "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/groups" + "github.com/mainflux/mainflux/users/groups/api" + gmocks "github.com/mainflux/mainflux/users/groups/mocks" + "github.com/mainflux/mainflux/users/jwt" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func newGroupsServer(svc groups.Service) *httptest.Server { + logger := logger.NewMock() + mux := bone.New() + api.MakeGroupsHandler(svc, mux, logger) + return httptest.NewServer(mux) +} + +func TestCreateGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + group := sdk.Group{ + Name: "groupName", + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + cases := []struct { + desc string + group sdk.Group + token string + err errors.SDKError + }{ + { + desc: "create group successfully", + group: group, + token: token, + err: nil, + }, + { + desc: "create group with existing name", + group: group, + err: nil, + }, + { + desc: "create group with parent", + group: sdk.Group{ + Name: gName, + ParentID: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.EnabledStatus.String(), + }, + err: nil, + }, + { + desc: "create group with invalid parent", + group: sdk.Group{ + Name: gName, + ParentID: gmocks.WrongID, + Status: mfclients.EnabledStatus.String(), + }, + err: errors.NewSDKErrorWithStatus(errors.ErrCreateEntity, http.StatusInternalServerError), + }, + { + desc: "create group with invalid owner", + group: sdk.Group{ + Name: gName, + OwnerID: gmocks.WrongID, + Status: mfclients.EnabledStatus.String(), + }, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedCreation, http.StatusInternalServerError), + }, + { + desc: "create group with missing name", + group: sdk.Group{ + Status: mfclients.EnabledStatus.String(), + }, + err: errors.NewSDKErrorWithStatus(apiutil.ErrNameSize, http.StatusBadRequest), + }, + { + desc: "create a group with every field defined", + group: sdk.Group{ + ID: generateUUID(t), + OwnerID: "owner", + ParentID: "parent", + Name: "name", + Description: description, + Metadata: validMetadata, + Level: 1, + Children: []*sdk.Group{&group}, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Status: mfclients.EnabledStatus.String(), + }, + token: token, + err: nil, + }, + } + for _, tc := range cases { + repoCall := gRepo.On("Save", mock.Anything, mock.Anything).Return(convertGroup(sdk.Group{}), tc.err) + rGroup, err := groupSDK.CreateGroup(tc.group, generateValidToken(t, csvc, cRepo)) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) + if err == nil { + assert.NotEmpty(t, rGroup, fmt.Sprintf("%s: expected not nil on client ID", tc.desc)) + ok := repoCall.Parent.AssertCalled(t, "Save", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + } + repoCall.Unset() + } +} + +func TestListGroups(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + var grps []sdk.Group + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + + for i := 10; i < 100; i++ { + gr := sdk.Group{ + ID: generateUUID(t), + Name: fmt.Sprintf("group_%d", i), + Metadata: sdk.Metadata{"name": fmt.Sprintf("user_%d", i)}, + Status: mfclients.EnabledStatus.String(), + } + grps = append(grps, gr) + } + + cases := []struct { + desc string + token string + status mfclients.Status + total uint64 + offset uint64 + limit uint64 + level int + name string + ownerID string + metadata sdk.Metadata + err errors.SDKError + response []sdk.Group + }{ + { + desc: "get a list of groups", + token: token, + limit: limit, + offset: offset, + total: total, + err: nil, + response: grps[offset:limit], + }, + { + desc: "get a list of groups with invalid token", + token: invalidToken, + offset: offset, + limit: limit, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), + response: nil, + }, + { + desc: "get a list of groups with empty token", + token: "", + offset: offset, + limit: limit, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), + response: nil, + }, + { + desc: "get a list of groups with zero limit", + token: token, + offset: offset, + limit: 0, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), + response: nil, + }, + { + desc: "get a list of groups with limit greater than max", + token: token, + offset: offset, + limit: 110, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), + response: []sdk.Group(nil), + }, + { + desc: "get a list of groups with given name", + token: token, + offset: 0, + limit: 1, + err: nil, + metadata: sdk.Metadata{}, + response: []sdk.Group{grps[89]}, + }, + { + desc: "get a list of groups with level", + token: token, + offset: 0, + limit: 1, + level: 1, + err: nil, + response: []sdk.Group{grps[0]}, + }, + { + desc: "get a list of groups with metadata", + token: token, + offset: 0, + limit: 1, + err: nil, + metadata: sdk.Metadata{}, + response: []sdk.Group{grps[89]}, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveAll", mock.Anything, mock.Anything).Return(mfgroups.GroupsPage{Groups: convertGroups(tc.response)}, tc.err) + pm := sdk.PageMetadata{} + page, err := groupSDK.Groups(pm, generateValidToken(t, csvc, cRepo)) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, len(tc.response), len(page.Groups), fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "RetrieveAll", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("RetrieveAll was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestViewGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + group := sdk.Group{ + Name: "groupName", + Description: description, + Metadata: validMetadata, + Children: []*sdk.Group{}, + Status: mfclients.EnabledStatus.String(), + } + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + group.ID = generateUUID(t) + + cases := []struct { + desc string + token string + groupID string + response sdk.Group + err errors.SDKError + }{ + { + + desc: "view group", + token: generateValidToken(t, csvc, cRepo), + groupID: group.ID, + response: group, + err: nil, + }, + { + desc: "view group with invalid token", + token: "wrongtoken", + groupID: group.ID, + response: sdk.Group{Children: []*sdk.Group{}}, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "view group for wrong id", + token: generateValidToken(t, csvc, cRepo), + groupID: gmocks.WrongID, + response: sdk.Group{Children: []*sdk.Group{}}, + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveByID", mock.Anything, tc.groupID).Return(convertGroup(tc.response), tc.err) + grp, err := groupSDK.Group(tc.groupID, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + if len(tc.response.Children) == 0 { + tc.response.Children = nil + } + if len(grp.Children) == 0 { + grp.Children = nil + } + assert.Equal(t, tc.response, grp, fmt.Sprintf("%s: expected metadata %v got %v\n", tc.desc, tc.response, grp)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, tc.groupID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + group := sdk.Group{ + ID: generateUUID(t), + Name: "groupName", + Description: description, + Metadata: validMetadata, + } + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + + group.ID = generateUUID(t) + + cases := []struct { + desc string + token string + group sdk.Group + response sdk.Group + err errors.SDKError + }{ + { + desc: "update group name", + group: sdk.Group{ + ID: group.ID, + Name: "NewName", + }, + response: sdk.Group{ + ID: group.ID, + Name: "NewName", + }, + token: generateValidToken(t, csvc, cRepo), + err: nil, + }, + { + desc: "update group description", + group: sdk.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: sdk.Group{ + ID: group.ID, + Description: "NewDescription", + }, + token: generateValidToken(t, csvc, cRepo), + err: nil, + }, + { + desc: "update group metadata", + group: sdk.Group{ + ID: group.ID, + Metadata: sdk.Metadata{ + "field": "value2", + }, + }, + response: sdk.Group{ + ID: group.ID, + Metadata: sdk.Metadata{ + "field": "value2", + }, + }, + token: generateValidToken(t, csvc, cRepo), + err: nil, + }, + { + desc: "update group name with invalid group id", + group: sdk.Group{ + ID: gmocks.WrongID, + Name: "NewName", + }, + response: sdk.Group{}, + token: generateValidToken(t, csvc, cRepo), + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + { + desc: "update group description with invalid group id", + group: sdk.Group{ + ID: gmocks.WrongID, + Description: "NewDescription", + }, + response: sdk.Group{}, + token: generateValidToken(t, csvc, cRepo), + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + { + desc: "update group metadata with invalid group id", + group: sdk.Group{ + ID: gmocks.WrongID, + Metadata: sdk.Metadata{ + "field": "value2", + }, + }, + response: sdk.Group{}, + token: generateValidToken(t, csvc, cRepo), + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + { + desc: "update group name with invalid token", + group: sdk.Group{ + ID: group.ID, + Name: "NewName", + }, + response: sdk.Group{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update group description with invalid token", + group: sdk.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: sdk.Group{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update group metadata with invalid token", + group: sdk.Group{ + ID: group.ID, + Metadata: sdk.Metadata{ + "field": "value2", + }, + }, + response: sdk.Group{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("Update", mock.Anything, mock.Anything).Return(convertGroup(tc.response), tc.err) + _, err := groupSDK.UpdateGroup(tc.group, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Update", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListMemberships(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + + var nGroups = uint64(100) + var aGroups = []sdk.Group{} + + for i := uint64(1); i < nGroups; i++ { + group := sdk.Group{ + Name: fmt.Sprintf("membership_%d@example.com", i), + Metadata: sdk.Metadata{"role": "group"}, + Status: mfclients.EnabledStatus.String(), + } + aGroups = append(aGroups, group) + } + + cases := []struct { + desc string + token string + clientID string + page sdk.PageMetadata + response []sdk.Group + err errors.SDKError + }{ + { + desc: "list clients with authorized token", + token: generateValidToken(t, csvc, cRepo), + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{}, + response: aGroups, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: generateValidToken(t, csvc, cRepo), + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus.String(), + }, + response: aGroups[6 : nGroups-1], + err: nil, + }, + { + desc: "list clients with given name", + token: generateValidToken(t, csvc, cRepo), + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Name: gName, + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus.String(), + }, + response: aGroups[6 : nGroups-1], + err: nil, + }, + { + desc: "list clients with given level", + token: generateValidToken(t, csvc, cRepo), + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Level: 1, + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus.String(), + }, + response: aGroups[6 : nGroups-1], + err: nil, + }, + { + desc: "list clients with metadata", + token: generateValidToken(t, csvc, cRepo), + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Metadata: validMetadata, + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus.String(), + }, + response: aGroups[6 : nGroups-1], + err: nil, + }, + { + desc: "list clients with an invalid token", + token: invalidToken, + clientID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{}, + response: []sdk.Group(nil), + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "list clients with an invalid id", + token: generateValidToken(t, csvc, cRepo), + clientID: gmocks.WrongID, + page: sdk.PageMetadata{}, + response: []sdk.Group(nil), + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("Memberships", mock.Anything, tc.clientID, mock.Anything).Return(convertMembershipsPage(sdk.MembershipsPage{Memberships: tc.response}), tc.err) + page, err := groupSDK.Memberships(tc.clientID, tc.page, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page.Memberships, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page.Memberships)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Memberships", mock.Anything, tc.clientID, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Memberships was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestEnableGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + + creationTime := time.Now().UTC() + group := sdk.Group{ + ID: generateUUID(t), + Name: gName, + OwnerID: generateUUID(t), + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.Disabled, + } + + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveByID", mock.Anything, mock.Anything).Return(nil) + repoCall2 := gRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(sdk.ErrFailedRemoval) + _, err := groupSDK.EnableGroup("wrongID", generateValidToken(t, csvc, cRepo)) + assert.Equal(t, err, errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), fmt.Sprintf("Enable group with wrong id: expected %v got %v", errors.ErrNotFound, err)) + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, "CheckAdmin was not called on enabling group") + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, "wrongID") + assert.True(t, ok, "RetrieveByID was not called on enabling group") + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + + g := mfgroups.Group{ + ID: group.ID, + Name: group.Name, + Owner: group.OwnerID, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.DisabledStatus, + } + + repoCall = pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 = gRepo.On("RetrieveByID", mock.Anything, mock.Anything).Return(g, nil) + repoCall2 = gRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(g, nil) + res, err := groupSDK.EnableGroup(group.ID, generateValidToken(t, csvc, cRepo)) + assert.Nil(t, err, fmt.Sprintf("Enable group with correct id: expected %v got %v", nil, err)) + assert.Equal(t, group, res, fmt.Sprintf("Enable group with correct id: expected %v got %v", group, res)) + ok = repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, "CheckAdmin was not called on enabling group") + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, group.ID) + assert.True(t, ok, "RetrieveByID was not called on enabling group") + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", mock.Anything, mock.Anything) + assert.True(t, ok, "ChangeStatus was not called on enabling group") + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() +} + +func TestDisableGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(gmocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + ts := newGroupsServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + groupSDK := sdk.NewSDK(conf) + + creationTime := time.Now().UTC() + group := sdk.Group{ + ID: generateUUID(t), + Name: gName, + OwnerID: generateUUID(t), + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.Enabled, + } + + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := gRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(sdk.ErrFailedRemoval) + repoCall2 := gRepo.On("RetrieveByID", mock.Anything, mock.Anything).Return(nil) + _, err := groupSDK.DisableGroup("wrongID", generateValidToken(t, csvc, cRepo)) + assert.Equal(t, err, errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), fmt.Sprintf("Disable group with wrong id: expected %v got %v", errors.ErrNotFound, err)) + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, "CheckAdmin was not called on disabling group with wrong id") + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, "wrongID") + assert.True(t, ok, "Memberships was not called on disabling group with wrong id") + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + + g := mfgroups.Group{ + ID: group.ID, + Name: group.Name, + Owner: group.OwnerID, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.EnabledStatus, + } + + repoCall = pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 = gRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(g, nil) + repoCall2 = gRepo.On("RetrieveByID", mock.Anything, mock.Anything).Return(g, nil) + res, err := groupSDK.DisableGroup(group.ID, generateValidToken(t, csvc, cRepo)) + assert.Nil(t, err, fmt.Sprintf("Disable group with correct id: expected %v got %v", nil, err)) + assert.Equal(t, group, res, fmt.Sprintf("Disable group with correct id: expected %v got %v", group, res)) + ok = repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, "CheckAdmin was not called on disabling group with correct id") + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, group.ID) + assert.True(t, ok, "RetrieveByID was not called on disabling group with correct id") + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", mock.Anything, mock.Anything) + assert.True(t, ok, "ChangeStatus was not called on disabling group with correct id") + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() +} diff --git a/pkg/sdk/go/health_test.go b/pkg/sdk/go/health_test.go deleted file mode 100644 index 57a11a1773..0000000000 --- a/pkg/sdk/go/health_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package sdk_test - -import ( - "fmt" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/stretchr/testify/assert" -) - -const ( - thingsDescription = "things service" - thingsStatus = "pass" -) - -func TestHealth(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - cases := map[string]struct { - empty bool - err errors.SDKError - }{ - "get things service health check": { - empty: false, - err: nil, - }, - } - for desc, tc := range cases { - h, err := mainfluxSDK.Health() - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", desc, tc.err, err)) - assert.Equal(t, thingsStatus, h.Status, fmt.Sprintf("%s: expected %s status, got %s", desc, thingsStatus, h.Status)) - assert.Equal(t, tc.empty, h.Version == "", fmt.Sprintf("%s: expected non-empty version", desc)) - assert.Equal(t, mainflux.Commit, h.Commit, fmt.Sprintf("%s: expected non-empty commit", desc)) - assert.Equal(t, thingsDescription, h.Description, fmt.Sprintf("%s: expected proper description, got %s", desc, h.Description)) - assert.Equal(t, mainflux.BuildTime, h.BuildTime, fmt.Sprintf("%s: expected default epoch date, got %s", desc, h.BuildTime)) - } -} diff --git a/pkg/sdk/go/keys.go b/pkg/sdk/go/keys.go deleted file mode 100644 index a768709e57..0000000000 --- a/pkg/sdk/go/keys.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package sdk - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/mainflux/mainflux/pkg/errors" -) - -type keyReq struct { - Type uint32 `json:"type,omitempty"` - Duration time.Duration `json:"duration,omitempty"` -} - -const keysEndpoint = "keys" - -const ( - // LoginKey is temporary User key received on successful login. - LoginKey uint32 = iota - // RecoveryKey represents a key for resseting password. - RecoveryKey - // APIKey enables the one to act on behalf of the user. - APIKey -) - -func (sdk mfSDK) Issue(d time.Duration, token string) (KeyRes, errors.SDKError) { - datareq := keyReq{Type: APIKey, Duration: d} - data, err := json.Marshal(datareq) - if err != nil { - return KeyRes{}, errors.NewSDKError(err) - } - - url := fmt.Sprintf("%s/%s", sdk.authURL, keysEndpoint) - - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) - if sdkerr != nil { - return KeyRes{}, sdkerr - } - - var key KeyRes - if err := json.Unmarshal(body, &key); err != nil { - return KeyRes{}, errors.NewSDKError(err) - } - - return key, nil -} - -func (sdk mfSDK) Revoke(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s", sdk.authURL, keysEndpoint, id) - _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) - return err -} - -func (sdk mfSDK) RetrieveKey(id, token string) (retrieveKeyRes, errors.SDKError) { - url := fmt.Sprintf("%s/%s/%s", sdk.authURL, keysEndpoint, id) - _, body, err := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) - if err != nil { - return retrieveKeyRes{}, err - } - - var key retrieveKeyRes - if err := json.Unmarshal(body, &key); err != nil { - return retrieveKeyRes{}, errors.NewSDKError(err) - } - - return key, nil -} diff --git a/pkg/sdk/go/message_test.go b/pkg/sdk/go/message_test.go index 45f02c06f5..793f2cc988 100644 --- a/pkg/sdk/go/message_test.go +++ b/pkg/sdk/go/message_test.go @@ -9,28 +9,25 @@ import ( "net/http/httptest" "testing" - "github.com/mainflux/mainflux" adapter "github.com/mainflux/mainflux/http" "github.com/mainflux/mainflux/http/api" "github.com/mainflux/mainflux/http/mocks" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/opentracing/opentracing-go/mocktracer" + "github.com/mainflux/mainflux/things/policies" "github.com/stretchr/testify/assert" ) const eof = "EOF" -func newMessageService(cc mainflux.ThingsServiceClient) adapter.Service { +func newMessageService(cc policies.ThingsServiceClient) adapter.Service { pub := mocks.NewPublisher() return adapter.New(pub, cc) } func newMessageServer(svc adapter.Service) *httptest.Server { - logger := logger.NewMock() - mux := api.MakeHandler(svc, mocktracer.New(), logger) + mux := api.MakeHandler(svc) return httptest.NewServer(mux) } @@ -96,7 +93,11 @@ func TestSendMessage(t *testing.T) { } for desc, tc := range cases { err := mainfluxSDK.SendMessage(tc.chanID, tc.msg, tc.auth) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", desc, tc.err, err)) + if tc.err == nil { + assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error: %s", desc, err)) + } else { + assert.Equal(t, tc.err.Error(), err.Error(), fmt.Sprintf("%s: expected error %s, got %s", desc, err, tc.err)) + } } } diff --git a/pkg/sdk/go/metadata.go b/pkg/sdk/go/metadata.go new file mode 100644 index 0000000000..26206d1429 --- /dev/null +++ b/pkg/sdk/go/metadata.go @@ -0,0 +1,3 @@ +package sdk + +type Metadata map[string]interface{} diff --git a/pkg/sdk/go/policies.go b/pkg/sdk/go/policies.go index 499b0cfa7d..bb58da7f65 100644 --- a/pkg/sdk/go/policies.go +++ b/pkg/sdk/go/policies.go @@ -4,38 +4,153 @@ import ( "encoding/json" "fmt" "net/http" + "time" "github.com/mainflux/mainflux/pkg/errors" ) -const policiesEndpoint = "policies" +const ( + policiesEndpoint = "policies" +) +// Policy represents an argument struct for making a policy related function calls. type Policy struct { - Object string `json:"object,omitempty"` - Subject []string `json:"subjects,omitempty"` - Policies []string `json:"policies,omitempty"` + OwnerID string `json:"owner_id"` + Subject string `json:"subject"` + Object string `json:"object"` + Actions []string `json:"actions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } -func (sdk mfSDK) CreatePolicy(policy Policy, token string) errors.SDKError { - data, err := json.Marshal(policy) +// CreatePolicy creates a policy for the given subject, so that, after +// CreatePolicy, `subject` has a `relation` on `object`. Returns a non-nil +// error in case of failures. +func (sdk mfSDK) CreatePolicy(p Policy, token string) errors.SDKError { + data, err := json.Marshal(p) + if err != nil { + return errors.NewSDKError(err) + } + + url := fmt.Sprintf("%s/%s", sdk.usersURL, policiesEndpoint) + _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + if sdkerr != nil { + return sdkerr + } + + return nil +} + +// UpdatePolicy updates policies based on the given policy structure. +func (sdk mfSDK) UpdatePolicy(p Policy, token string) errors.SDKError { + data, err := json.Marshal(p) if err != nil { return errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.authURL, policiesEndpoint) + url := fmt.Sprintf("%s/%s", sdk.usersURL, policiesEndpoint) + + _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusNoContent) + if sdkerr != nil { + return sdkerr + } + + return nil +} + +// ListPolicies lists policies based on the given policy structure. +func (sdk mfSDK) ListPolicies(pm PageMetadata, token string) (PolicyPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.usersURL, policiesEndpoint, pm) + if err != nil { + return PolicyPage{}, errors.NewSDKError(err) + } + + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return PolicyPage{}, sdkerr + } + + var pp PolicyPage + if err := json.Unmarshal(body, &pp); err != nil { + return PolicyPage{}, errors.NewSDKError(err) + } + + return pp, nil +} + +// DeletePolicy removes a policy. +func (sdk mfSDK) DeletePolicy(p Policy, token string) errors.SDKError { + url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, policiesEndpoint, p.Subject, p.Object) + + _, _, sdkerr := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) + return sdkerr +} +func (sdk mfSDK) Assign(memberType []string, memberID, groupID, token string) errors.SDKError { + var policy = Policy{ + Subject: memberID, + Object: groupID, + Actions: memberType, + } + data, err := json.Marshal(policy) + if err != nil { + return errors.NewSDKError(err) + } + url := fmt.Sprintf("%s/%s", sdk.usersURL, policiesEndpoint) _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) return sdkerr } -func (sdk mfSDK) DeletePolicy(policy Policy, token string) errors.SDKError { +func (sdk mfSDK) Unassign(memberType []string, groupID string, memberID string, token string) errors.SDKError { + var policy = Policy{ + Subject: memberID, + Object: groupID, + Actions: memberType, + } data, err := json.Marshal(policy) if err != nil { return errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.authURL, policiesEndpoint) + url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, policiesEndpoint, groupID, memberID) _, _, sdkerr := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), data, http.StatusNoContent) return sdkerr } + +func (sdk mfSDK) Connect(connIDs ConnectionIDs, token string) errors.SDKError { + data, err := json.Marshal(connIDs) + if err != nil { + return errors.NewSDKError(err) + } + + url := fmt.Sprintf("%s/%s", sdk.thingsURL, connectEndpoint) + + _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + return sdkerr +} + +func (sdk mfSDK) Disconnect(connIDs ConnectionIDs, token string) errors.SDKError { + data, err := json.Marshal(connIDs) + if err != nil { + return errors.NewSDKError(err) + } + + url := fmt.Sprintf("%s/%s", sdk.thingsURL, disconnectEndpoint) + _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusNoContent) + return sdkerr +} + +func (sdk mfSDK) ConnectThing(thingID, chanID, token string) errors.SDKError { + url := fmt.Sprintf("%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, chanID, thingsEndpoint, thingID) + + _, _, err := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusCreated) + return err +} + +func (sdk mfSDK) DisconnectThing(thingID, chanID, token string) errors.SDKError { + url := fmt.Sprintf("%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, chanID, thingsEndpoint, thingID) + + _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) + return err +} diff --git a/pkg/sdk/go/policies_test.go b/pkg/sdk/go/policies_test.go new file mode 100644 index 0000000000..a61e4ecd0d --- /dev/null +++ b/pkg/sdk/go/policies_test.go @@ -0,0 +1,396 @@ +package sdk_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/pkg/errors" + sdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/users/clients" + cmocks "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/jwt" + "github.com/mainflux/mainflux/users/policies" + api "github.com/mainflux/mainflux/users/policies/api/http" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func newPolicyServer(svc policies.Service) *httptest.Server { + logger := logger.NewMock() + mux := bone.New() + api.MakePolicyHandler(svc, mux, logger) + return httptest.NewServer(mux) +} + +func TestCreatePolicy(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + ts := newPolicyServer(svc) + defer ts.Close() + conf := sdk.Config{ + UsersURL: ts.URL, + } + policySDK := sdk.NewSDK(conf) + + clientPolicy := sdk.Policy{Object: object, Actions: []string{"m_write", "g_add"}, Subject: subject} + + cases := []struct { + desc string + policy sdk.Policy + page sdk.PolicyPage + token string + err errors.SDKError + }{ + { + desc: "add new policy", + policy: sdk.Policy{ + Subject: subject, + Object: object, + Actions: []string{"m_write", "g_add"}, + }, + page: sdk.PolicyPage{}, + token: generateValidToken(t, csvc, cRepo), + err: nil, + }, + { + desc: "add existing policy", + policy: sdk.Policy{ + Subject: subject, + Object: object, + Actions: []string{"m_write", "g_add"}, + }, + page: sdk.PolicyPage{Policies: []sdk.Policy{sdk.Policy(clientPolicy)}}, + token: generateValidToken(t, csvc, cRepo), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedCreation, http.StatusInternalServerError), + }, + { + desc: "add a new policy with owner", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + OwnerID: generateUUID(t), + Object: "objwithowner", + Actions: []string{"m_read"}, + Subject: "subwithowner", + }, + err: nil, + token: generateValidToken(t, csvc, cRepo), + }, + { + desc: "add a new policy with more actions", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + Object: "obj2", + Actions: []string{"c_delete", "c_update", "c_add", "c_list"}, + Subject: "sub2", + }, + err: nil, + token: generateValidToken(t, csvc, cRepo), + }, + { + desc: "add a new policy with wrong action", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + Object: "obj3", + Actions: []string{"wrong"}, + Subject: "sub3", + }, + err: errors.NewSDKErrorWithStatus(apiutil.ErrMalformedPolicyAct, http.StatusInternalServerError), + token: generateValidToken(t, csvc, cRepo), + }, + { + desc: "add a new policy with empty object", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + Actions: []string{"c_delete"}, + Subject: "sub4", + }, + err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingPolicyObj, http.StatusInternalServerError), + token: generateValidToken(t, csvc, cRepo), + }, + { + desc: "add a new policy with empty subject", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + Actions: []string{"c_delete"}, + Object: "obj4", + }, + err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingPolicySub, http.StatusInternalServerError), + token: generateValidToken(t, csvc, cRepo), + }, + { + desc: "add a new policy with empty action", + page: sdk.PolicyPage{}, + policy: sdk.Policy{ + Subject: "sub5", + Object: "obj5", + }, + err: errors.NewSDKErrorWithStatus(apiutil.ErrMalformedPolicyAct, http.StatusInternalServerError), + token: generateValidToken(t, csvc, cRepo), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("Retrieve", mock.Anything, mock.Anything).Return(convertPolicyPage(tc.page), nil) + repoCall1 := pRepo.On("Update", mock.Anything, mock.Anything).Return(tc.err) + repoCall2 := pRepo.On("Save", mock.Anything, mock.Anything).Return(tc.err) + err := policySDK.CreatePolicy(tc.policy, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "Retrieve", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Retrieve was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "Save", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + if tc.desc == "add existing policy" { + ok = repoCall1.Parent.AssertCalled(t, "Update", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } +} + +func TestUpdatePolicy(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + ts := newPolicyServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + policySDK := sdk.NewSDK(conf) + + policy := sdk.Policy{ + Subject: subject, + Object: object, + Actions: []string{"m_write", "g_add"}, + } + + cases := []struct { + desc string + action []string + token string + err errors.SDKError + }{ + { + desc: "update policy actions with valid token", + action: []string{"m_write", "m_read", "g_add"}, + token: generateValidToken(t, csvc, cRepo), + err: nil, + }, + { + desc: "update policy action with invalid token", + action: []string{"m_write"}, + token: "non-existent", + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update policy action with wrong policy action", + action: []string{"wrong"}, + token: generateValidToken(t, csvc, cRepo), + err: errors.NewSDKErrorWithStatus(apiutil.ErrMalformedPolicyAct, http.StatusInternalServerError), + }, + } + + for _, tc := range cases { + policy.Actions = tc.action + policy.CreatedAt = time.Now() + repoCall := pRepo.On("Retrieve", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(policies.PolicyPage{}, nil) + repoCall1 := pRepo.On("Update", mock.Anything, mock.Anything).Return(tc.err) + err := policySDK.UpdatePolicy(policy, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + ok := repoCall1.Parent.AssertCalled(t, "Update", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListPolicies(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + ts := newPolicyServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + policySDK := sdk.NewSDK(conf) + id := generateUUID(t) + + var nPolicy = uint64(10) + var aPolicies = []sdk.Policy{} + for i := uint64(0); i < nPolicy; i++ { + pr := sdk.Policy{ + OwnerID: id, + Actions: []string{"m_read"}, + Subject: fmt.Sprintf("thing_%d", i), + Object: fmt.Sprintf("client_%d", i), + } + if i%3 == 0 { + pr.Actions = []string{"m_write"} + } + aPolicies = append(aPolicies, pr) + } + + cases := []struct { + desc string + token string + page sdk.PageMetadata + response []sdk.Policy + err errors.SDKError + }{ + { + desc: "list policies with authorized token", + token: generateValidToken(t, csvc, cRepo), + err: nil, + response: aPolicies, + }, + { + desc: "list policies with invalid token", + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + response: []sdk.Policy(nil), + }, + { + desc: "list policies with offset and limit", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with given name", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with given identifier", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with given ownerID", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with given subject", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with given object", + token: generateValidToken(t, csvc, cRepo), + err: nil, + page: sdk.PageMetadata{ + Offset: 6, + Limit: nPolicy, + }, + response: aPolicies[6:10], + }, + { + desc: "list policies with wrong action", + token: generateValidToken(t, csvc, cRepo), + page: sdk.PageMetadata{ + Action: "wrong", + }, + response: []sdk.Policy(nil), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := pRepo.On("Retrieve", mock.Anything, mock.Anything).Return(convertPolicyPage(sdk.PolicyPage{Policies: tc.response}), tc.err) + pp, err := policySDK.ListPolicies(tc.page, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, pp.Policies, fmt.Sprintf("%s: expected %v, got %v", tc.desc, tc.response, pp)) + ok := repoCall.Parent.AssertCalled(t, "Retrieve", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Retrieve was not called on %s", tc.desc)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestDeletePolicy(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + csvc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + ts := newPolicyServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + policySDK := sdk.NewSDK(conf) + + sub := generateUUID(t) + pr := sdk.Policy{Object: authoritiesObj, Actions: []string{"m_read", "g_add", "c_delete"}, Subject: sub} + cpr := sdk.Policy{Object: authoritiesObj, Actions: []string{"m_read", "g_add", "c_delete"}, Subject: sub} + + repoCall := pRepo.On("Retrieve", mock.Anything, mock.Anything).Return(convertPolicyPage(sdk.PolicyPage{Policies: []sdk.Policy{cpr}}), nil) + repoCall1 := pRepo.On("Delete", mock.Anything, mock.Anything).Return(nil) + err := policySDK.DeletePolicy(pr, generateValidToken(t, csvc, cRepo)) + assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + ok := repoCall1.Parent.AssertCalled(t, "Delete", mock.Anything, mock.Anything) + assert.True(t, ok, "Delete was not called on valid policy") + repoCall1.Unset() + repoCall.Unset() + + repoCall = pRepo.On("Retrieve", mock.Anything, mock.Anything).Return(convertPolicyPage(sdk.PolicyPage{Policies: []sdk.Policy{cpr}}), nil) + repoCall1 = pRepo.On("Delete", mock.Anything, mock.Anything).Return(sdk.ErrFailedRemoval) + err = policySDK.DeletePolicy(pr, invalidToken) + assert.Equal(t, err, errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), fmt.Sprintf("expected %s got %s", pr, err)) + ok = repoCall.Parent.AssertCalled(t, "Delete", mock.Anything, mock.Anything) + assert.True(t, ok, "Delete was not called on invalid policy") + repoCall1.Unset() + repoCall.Unset() +} diff --git a/pkg/sdk/go/requests.go b/pkg/sdk/go/requests.go index 1ca437a12f..d1503b7191 100644 --- a/pkg/sdk/go/requests.go +++ b/pkg/sdk/go/requests.go @@ -3,9 +3,21 @@ package sdk -type assignRequest struct { - Type string `json:"type,omitempty"` - Members []string `json:"members"` +// updateClientSecretReq is used to update the client secret +type updateClientSecretReq struct { + OldSecret string `json:"old_secret,omitempty"` + NewSecret string `json:"new_secret,omitempty"` +} + +type updateThingSecretReq struct { + Secret string `json:"secret,omitempty"` +} + +// updateClientIdentityReq is used to update the client identity +type updateClientIdentityReq struct { + token string + id string + Identity string `json:"identity,omitempty"` } // UserPasswordReq contains old and new passwords @@ -16,6 +28,16 @@ type UserPasswordReq struct { // ConnectionIDs contains ID lists of things and channels to be connected type ConnectionIDs struct { - ChannelIDs []string `json:"channel_ids"` - ThingIDs []string `json:"thing_ids"` + ChannelIDs []string `json:"group_ids"` + ThingIDs []string `json:"client_ids"` + Actions []string `json:"actions,omitempty"` +} + +type tokenReq struct { + Identity string `json:"identity"` + Secret string `json:"secret"` +} + +type identifyThingReq struct { + Token string `json:"token,omitempty"` } diff --git a/pkg/sdk/go/responses.go b/pkg/sdk/go/responses.go index 47e5446fee..755a45fe5c 100644 --- a/pkg/sdk/go/responses.go +++ b/pkg/sdk/go/responses.go @@ -10,10 +10,6 @@ import ( "github.com/mainflux/mainflux/pkg/transformers/senml" ) -type tokenRes struct { - Token string `json:"token,omitempty"` -} - type createThingsRes struct { Things []Thing `json:"things"` } @@ -57,10 +53,23 @@ type UsersPage struct { } type MembersPage struct { - Members []string `json:"members"` + Members []User `json:"members"` + pageRes +} + +// MembershipsPage contains page related metadata as well as list of memberships that +// belong to this page. +type MembershipsPage struct { pageRes + Memberships []Group `json:"memberships"` } +// PolicyPage contains page related metadata as well as list +// of Policies that belong to the page. +type PolicyPage struct { + PageMetadata + Policies []Policy +} type KeyRes struct { ID string `json:"id,omitempty"` Value string `json:"value,omitempty"` @@ -80,26 +89,6 @@ func (res KeyRes) Empty() bool { return res.Value == "" } -type retrieveKeyRes struct { - ID string `json:"id,omitempty"` - IssuerID string `json:"issuer_id,omitempty"` - Subject string `json:"subject,omitempty"` - IssuedAt time.Time `json:"issued_at,omitempty"` - ExpiresAt *time.Time `json:"expires_at,omitempty"` -} - -func (res retrieveKeyRes) Code() int { - return http.StatusOK -} - -func (res retrieveKeyRes) Headers() map[string]string { - return map[string]string{} -} - -func (res retrieveKeyRes) Empty() bool { - return false -} - type revokeCertsRes struct { RevocationTime time.Time `json:"revocation_time"` } @@ -119,3 +108,7 @@ type SubscriptionPage struct { Subscriptions []Subscription `json:"subscriptions"` pageRes } + +type identifyThingResp struct { + ID string `json:"id,omitempty"` +} diff --git a/pkg/sdk/go/sdk.go b/pkg/sdk/go/sdk.go index 579dc31d98..b06bf0ccac 100644 --- a/pkg/sdk/go/sdk.go +++ b/pkg/sdk/go/sdk.go @@ -8,7 +8,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -29,6 +29,12 @@ const ( // CTBinary represents binary content type. CTBinary ContentType = "application/octet-stream" + + // EnabledStatus represents enable status for a client + EnabledStatus = "enabled" + + // DisabledStatus represents disabled status for a client + DisabledStatus = "disabled" ) // ContentType represents all possible content types. @@ -36,67 +42,65 @@ type ContentType string var _ SDK = (*mfSDK)(nil) -// User represents mainflux user its credentials. -type User struct { - ID string `json:"id,omitempty"` - Email string `json:"email,omitempty"` - Groups []string `json:"groups,omitempty"` - Password string `json:"password,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} -type PageMetadata struct { - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` - Level uint64 `json:"level,omitempty"` - Email string `json:"email,omitempty"` - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - Disconnected bool `json:"disconnected,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - Status string `json:"status,omitempty"` - State string `json:"state,omitempty"` - Topic string `json:"topic,omitempty"` - Contact string `json:"contact,omitempty"` -} +var ( + // ErrFailedCreation indicates that entity creation failed. + ErrFailedCreation = errors.New("failed to create entity in the db") -// Group represents mainflux users group. -type Group struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - ParentID string `json:"parent_id,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} + // ErrFailedList indicates that entities list failed. + ErrFailedList = errors.New("failed to list entities") -// Thing represents mainflux thing. -type Thing struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Key string `json:"key,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} + // ErrFailedUpdate indicates that entity update failed. + ErrFailedUpdate = errors.New("failed to update entity") + + // ErrFailedFetch indicates that fetching of entity data failed. + ErrFailedFetch = errors.New("failed to fetch entity") + + // ErrFailedRemoval indicates that entity removal failed. + ErrFailedRemoval = errors.New("failed to remove entity") + + // ErrFailedEnable indicates that client enable failed. + ErrFailedEnable = errors.New("failed to enable client") -// Channel represents mainflux channel. -type Channel struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` + // ErrFailedDisable indicates that client disable failed. + ErrFailedDisable = errors.New("failed to disable client") +) + +type PageMetadata struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + Level uint64 `json:"level,omitempty"` + Email string `json:"email,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Disconnected bool `json:"disconnected,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + Status string `json:"status,omitempty"` + Action string `json:"action,omitempty"` + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` + Tag string `json:"tag,omitempty"` + Owner string `json:"owner,omitempty"` + SharedBy string `json:"shared_by,omitempty"` + Visibility string `json:"visibility,omitempty"` + OwnerID string `json:"owner_id,omitempty"` + Topic string `json:"topic,omitempty"` + Contact string `json:"contact,omitempty"` + State string `json:"state,omitempty"` } -type Key struct { - ID string - Type uint32 - IssuerID string - Subject string - IssuedAt time.Time - ExpiresAt time.Time +// Credentials represent client credentials: it contains +// "identity" which can be a username, email, generated name; +// and "secret" which can be a password or access token. +type Credentials struct { + Identity string `json:"identity,omitempty"` // username or generated login ID + Secret string `json:"secret,omitempty"` // password or token } // SDK contains Mainflux API. type SDK interface { // CreateUser registers mainflux user. - CreateUser(user User, token string) (string, errors.SDKError) + CreateUser(user User, token string) (User, errors.SDKError) // User returns user object by id. User(id, token string) (User, errors.SDKError) @@ -104,28 +108,46 @@ type SDK interface { // Users returns list of users. Users(pm PageMetadata, token string) (UsersPage, errors.SDKError) - // CreateToken receives credentials and returns user token. - CreateToken(user User) (string, errors.SDKError) + // Members retrieves everything that is assigned to a group identified by groupID. + Members(groupID string, meta PageMetadata, token string) (MembersPage, errors.SDKError) + + // UserProfile returns user logged in. + UserProfile(token string) (User, errors.SDKError) // UpdateUser updates existing user. - UpdateUser(user User, token string) errors.SDKError + UpdateUser(user User, token string) (User, errors.SDKError) + + // UpdateUserTags updates the user's tags. + UpdateUserTags(user User, token string) (User, errors.SDKError) + + // UpdateUserIdentity updates the user's identity + UpdateUserIdentity(user User, token string) (User, errors.SDKError) + + // UpdateUserOwner updates the user's owner. + UpdateUserOwner(user User, token string) (User, errors.SDKError) // UpdatePassword updates user password. - UpdatePassword(oldPass, newPass, token string) errors.SDKError + UpdatePassword(oldPass, newPass, token string) (User, errors.SDKError) // EnableUser changes the status of the user to enabled. - EnableUser(id, token string) errors.SDKError + EnableUser(id, token string) (User, errors.SDKError) // DisableUser changes the status of the user to disabled. - DisableUser(id, token string) errors.SDKError + DisableUser(id, token string) (User, errors.SDKError) + + // CreateToken receives credentials and returns user token. + CreateToken(user User) (Token, errors.SDKError) + + // RefreshToken receives credentials and returns user token. + RefreshToken(token string) (Token, errors.SDKError) // CreateThing registers new thing and returns its id. - CreateThing(thing Thing, token string) (string, errors.SDKError) + CreateThing(thing Thing, token string) (Thing, errors.SDKError) // CreateThings registers new things and returns their ids. CreateThings(things []Thing, token string) ([]Thing, errors.SDKError) - // Things returns page of things. + // Filters things and returns a page result. Things(pm PageMetadata, token string) (ThingsPage, errors.SDKError) // ThingsByChannel returns page of things that are connected or not connected @@ -136,34 +158,31 @@ type SDK interface { Thing(id, token string) (Thing, errors.SDKError) // UpdateThing updates existing thing. - UpdateThing(thing Thing, token string) errors.SDKError - - // DeleteThing removes existing thing. - DeleteThing(id, token string) errors.SDKError + UpdateThing(thing Thing, token string) (Thing, errors.SDKError) - // IdentifyThing validates thing's key and returns its ID - IdentifyThing(key string) (string, errors.SDKError) + // UpdateThingTags updates the client's tags. + UpdateThingTags(thing Thing, token string) (Thing, errors.SDKError) - // ShareThing shares a thing with user identified by request body. - ShareThing(thingID, userID string, policies []string, token string) errors.SDKError + // UpdateThingSecret updates the client's secret + UpdateThingSecret(id, secret, token string) (Thing, errors.SDKError) - // UpdateThingKey updates thing key - UpdateThingKey(id, key, token string) errors.SDKError + // UpdateThingOwner updates the client's owner. + UpdateThingOwner(thing Thing, token string) (Thing, errors.SDKError) - // AccessByThingKey checks if thing has access to a channel. - AccessByThingKey(channelID, key string) (string, errors.SDKError) + // EnableThing changes client status to enabled. + EnableThing(id, token string) (Thing, errors.SDKError) - // AccessByThingID checks if thing has access to a channel. - AccessByThingID(channelID, id string) errors.SDKError + // DisableThing changes client status to disabled - soft delete. + DisableThing(id, token string) (Thing, errors.SDKError) - // SearchThing search and retrieves things - SearchThing(t Thing, pm PageMetadata, token string) (ThingsPage, errors.SDKError) + // IdentifyThing validates thing's key and returns its ID + IdentifyThing(key string) (string, errors.SDKError) // CreateGroup creates new group and returns its id. - CreateGroup(group Group, token string) (string, errors.SDKError) + CreateGroup(group Group, token string) (Group, errors.SDKError) - // DeleteGroup deletes users group. - DeleteGroup(id, token string) errors.SDKError + // Memberships + Memberships(clientID string, pm PageMetadata, token string) (MembershipsPage, errors.SDKError) // Groups returns page of groups. Groups(pm PageMetadata, token string) (GroupsPage, errors.SDKError) @@ -177,35 +196,17 @@ type SDK interface { // Group returns users group object by id. Group(id, token string) (Group, errors.SDKError) - // Assign assigns member of member type (thing or user) to a group. - Assign(memberIDs []string, memberType, groupID, token string) errors.SDKError - - // Unassign removes member from a group. - Unassign(groupID string, memberIDs []string, token string) errors.SDKError - - // Members lists members of a group. - Members(groupID string, pm PageMetadata, token string) (MembersPage, errors.SDKError) - - // Memberships lists groups for user. - Memberships(userID string, pm PageMetadata, token string) (GroupsPage, errors.SDKError) - // UpdateGroup updates existing group. - UpdateGroup(group Group, token string) errors.SDKError - - // Connect bulk connects things to channels specified by id. - Connect(conns ConnectionIDs, token string) errors.SDKError - - // Disconnect bulk disconnects things to channels specified by id. - Disconnect(connIDs ConnectionIDs, token string) errors.SDKError + UpdateGroup(group Group, token string) (Group, errors.SDKError) - // ConnectThing connect thing from specified channel by id. - ConnectThing(thingID, chanID, token string) errors.SDKError + // EnableGroup changes group status to enabled. + EnableGroup(id, token string) (Group, errors.SDKError) - // DisconnectThing disconnect thing from specified channel by id. - DisconnectThing(thingID, chanID, token string) errors.SDKError + // DisableGroup changes group status to disabled - soft delete. + DisableGroup(id, token string) (Group, errors.SDKError) // CreateChannel creates new channel and returns its id. - CreateChannel(channel Channel, token string) (string, errors.SDKError) + CreateChannel(channel Channel, token string) (Channel, errors.SDKError) // CreateChannels registers new channels and returns their ids. CreateChannels(channels []Channel, token string) ([]Channel, errors.SDKError) @@ -221,10 +222,45 @@ type SDK interface { Channel(id, token string) (Channel, errors.SDKError) // UpdateChannel updates existing channel. - UpdateChannel(channel Channel, token string) errors.SDKError + UpdateChannel(channel Channel, token string) (Channel, errors.SDKError) + + // EnableChannel changes channel status to enabled. + EnableChannel(id, token string) (Channel, errors.SDKError) + + // DisableChannel changes channel status to disabled - soft delete. + DisableChannel(id, token string) (Channel, errors.SDKError) + + // CreatePolicy creates a policy for the given subject, so that, after + // CreatePolicy, `subject` has a `relation` on `object`. Returns a non-nil + // error in case of failures. + CreatePolicy(policy Policy, token string) errors.SDKError + + // DeletePolicy deletes policies. + DeletePolicy(policy Policy, token string) errors.SDKError - // DeleteChannel removes existing channel. - DeleteChannel(id, token string) errors.SDKError + // UpdatePolicy updates policies based on the given policy structure. + UpdatePolicy(p Policy, token string) errors.SDKError + + // ListPolicies lists policies based on the given policy structure. + ListPolicies(pm PageMetadata, token string) (PolicyPage, errors.SDKError) + + // Assign assigns member of member type (thing or user) to a group. + Assign(memberType []string, memberID, groupID, token string) errors.SDKError + + // Unassign removes member from a group. + Unassign(memberType []string, groupID string, memberID string, token string) errors.SDKError + + // Connect bulk connects things to channels specified by id. + Connect(conns ConnectionIDs, token string) errors.SDKError + + // Disconnect + Disconnect(connIDs ConnectionIDs, token string) errors.SDKError + + // ConnectThing + ConnectThing(thingID, chanID, token string) errors.SDKError + + // DisconnectThing disconnect thing from specified channel by id. + DisconnectThing(thingID, chanID, token string) errors.SDKError // SendMessage send message to specified channel. SendMessage(chanID, msg, key string) errors.SDKError @@ -247,7 +283,7 @@ type SDK interface { // Update updates editable fields of the provided Config. UpdateBootstrap(cfg BootstrapConfig, token string) errors.SDKError - // Update boostrap config certificates + // Update bootstrap config certificates. UpdateBootstrapCerts(id string, clientCert, clientKey, ca string, token string) errors.SDKError // UpdateBootstrapConnection updates connections performs update of the channel list corresponding Thing is connected to. @@ -268,7 +304,7 @@ type SDK interface { // Whitelist updates Thing state Config with given ID belonging to the user identified by the given token. Whitelist(cfg BootstrapConfig, token string) errors.SDKError - // IssueCert issues a certificate for a thing required for mtls. + // IssueCert issues a certificate for a thing required for mTLS. IssueCert(thingID, valid, token string) (Cert, errors.SDKError) // ViewCert returns a certificate given certificate ID @@ -280,15 +316,6 @@ type SDK interface { // RevokeCert revokes certificate for thing with thingID RevokeCert(thingID, token string) (time.Time, errors.SDKError) - // Issue issues a new key, returning its token value alongside. - Issue(duration time.Duration, token string) (KeyRes, errors.SDKError) - - // Revoke removes the key with the provided ID that is issued by the user identified by the provided key. - Revoke(id, token string) errors.SDKError - - // RetrieveKey retrieves data for the key identified by the provided ID, that is issued by the user identified by the provided key. - RetrieveKey(id, token string) (retrieveKeyRes, errors.SDKError) - // CreateSubscription creates a new subscription CreateSubscription(topic, contact, token string) (string, errors.SDKError) @@ -300,16 +327,9 @@ type SDK interface { // DeleteSubscription removes a subscription with the provided id. DeleteSubscription(id, token string) errors.SDKError - - // CreatePolicy creates new policies. - CreatePolicy(policy Policy, token string) errors.SDKError - - // DeletePolicy deletes policies. - DeletePolicy(policy Policy, token string) errors.SDKError } type mfSDK struct { - authURL string bootstrapURL string certsURL string httpAdapterURL string @@ -323,7 +343,6 @@ type mfSDK struct { // Config contains sdk configuration parameters. type Config struct { - AuthURL string BootstrapURL string CertsURL string HTTPAdapterURL string @@ -338,7 +357,6 @@ type Config struct { // NewSDK returns new mainflux SDK instance. func NewSDK(conf Config) SDK { return &mfSDK{ - authURL: conf.AuthURL, bootstrapURL: conf.BootstrapURL, certsURL: conf.CertsURL, httpAdapterURL: conf.HTTPAdapterURL, @@ -386,7 +404,7 @@ func (sdk mfSDK) processRequest(method, url, token, contentType string, data []b return make(http.Header), []byte{}, sdkerr } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return make(http.Header), []byte{}, errors.NewSDKError(err) } @@ -404,10 +422,15 @@ func (sdk mfSDK) withQueryParams(baseURL, endpoint string, pm PageMetadata) (str func (pm PageMetadata) query() (string, error) { q := url.Values{} - q.Add("total", strconv.FormatUint(pm.Total, 10)) - q.Add("offset", strconv.FormatUint(pm.Offset, 10)) - q.Add("limit", strconv.FormatUint(pm.Limit, 10)) - q.Add("disconnected", strconv.FormatBool(pm.Disconnected)) + if pm.Offset != 0 { + q.Add("offset", strconv.FormatUint(pm.Offset, 10)) + } + if pm.Limit != 0 { + q.Add("limit", strconv.FormatUint(pm.Limit, 10)) + } + if pm.Total != 0 { + q.Add("total", strconv.FormatUint(pm.Total, 10)) + } if pm.Level != 0 { q.Add("level", strconv.FormatUint(pm.Level, 10)) } @@ -420,6 +443,9 @@ func (pm PageMetadata) query() (string, error) { if pm.Type != "" { q.Add("type", pm.Type) } + if pm.Visibility != "" { + q.Add("visibility", pm.Visibility) + } if pm.Status != "" { q.Add("status", pm.Status) } diff --git a/pkg/sdk/go/setup_test.go b/pkg/sdk/go/setup_test.go new file mode 100644 index 0000000000..0795e9ad59 --- /dev/null +++ b/pkg/sdk/go/setup_test.go @@ -0,0 +1,248 @@ +package sdk_test + +import ( + "context" + "fmt" + "os" + "regexp" + "testing" + "time" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + sdk "github.com/mainflux/mainflux/pkg/sdk/go" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/users/clients" + umocks "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/hasher" + "github.com/mainflux/mainflux/users/policies" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +const ( + invalidIdentity = "invalididentity" + Identity = "identity" + secret = "strongsecret" + token = "token" + invalidToken = "invalidtoken" + contentType = "application/senml+json" +) + +var ( + idProvider = uuid.New() + phasher = hasher.New() + validMetadata = sdk.Metadata{"role": "client"} + user = sdk.User{ + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } + description = "shortdescription" + gName = "groupname" + + limit uint64 = 5 + offset uint64 = 0 + total uint64 = 200 + + authoritiesObj = "authorities" + subject = generateUUID(&testing.T{}) + object = generateUUID(&testing.T{}) + emailer = umocks.NewEmailer() + passRegex = regexp.MustCompile("^.{8,}$") + accessDuration = time.Minute * 1 + refreshDuration = time.Minute * 10 +) + +func generateValidToken(t *testing.T, svc clients.Service, cRepo *umocks.ClientRepository) string { + client := mfclients.Client{ + ID: generateUUID(t), + Name: "validtoken", + Credentials: mfclients.Credentials{ + Identity: "validtoken", + Secret: secret, + }, + Role: mfclients.AdminRole, + Status: mfclients.EnabledStatus, + } + rclient := client + rclient.Credentials.Secret, _ = phasher.Hash(client.Credentials.Secret) + + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), mock.Anything).Return(rclient, nil) + token, err := svc.IssueToken(context.Background(), client.Credentials.Identity, client.Credentials.Secret) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("Create token expected nil got %s\n", err)) + repoCall.Unset() + return token.AccessToken +} + +func generateUUID(t *testing.T) string { + ulid, err := idProvider.ID() + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + return ulid +} + +func convertClientsPage(cp sdk.UsersPage) mfclients.ClientsPage { + return mfclients.ClientsPage{ + Clients: convertClients(cp.Users), + } +} + +func convertClients(cs []sdk.User) []mfclients.Client { + ccs := []mfclients.Client{} + + for _, c := range cs { + ccs = append(ccs, convertClient(c)) + } + + return ccs +} + +func convertGroups(cs []sdk.Group) []mfgroups.Group { + cgs := []mfgroups.Group{} + + for _, c := range cs { + cgs = append(cgs, convertGroup(c)) + } + + return cgs +} + +func convertPolicies(cs []sdk.Policy) []policies.Policy { + ccs := []policies.Policy{} + + for _, c := range cs { + ccs = append(ccs, convertPolicy(c)) + } + + return ccs +} + +func convertPolicy(sp sdk.Policy) policies.Policy { + return policies.Policy{ + OwnerID: sp.OwnerID, + Subject: sp.Subject, + Object: sp.Object, + Actions: sp.Actions, + CreatedAt: sp.CreatedAt, + UpdatedAt: sp.UpdatedAt, + } +} + +func convertMembershipsPage(m sdk.MembershipsPage) mfgroups.MembershipsPage { + return mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Limit: m.Limit, + Total: m.Total, + Offset: m.Offset, + }, + Memberships: convertMemberships(m.Memberships), + } +} + +func convertClientPage(p sdk.PageMetadata) mfclients.Page { + if p.Status == "" { + p.Status = mfclients.EnabledStatus.String() + } + status, err := mfclients.ToStatus(p.Status) + if err != nil { + return mfclients.Page{} + } + return mfclients.Page{ + Status: status, + Total: p.Total, + Offset: p.Offset, + Limit: p.Limit, + Name: p.Name, + Action: p.Action, + Tag: p.Tag, + Metadata: mfclients.Metadata(p.Metadata), + } +} + +func convertMemberships(gs []sdk.Group) []mfgroups.Group { + cg := []mfgroups.Group{} + for _, g := range gs { + cg = append(cg, convertGroup(g)) + } + + return cg +} + +func convertGroup(g sdk.Group) mfgroups.Group { + if g.Status == "" { + g.Status = mfclients.EnabledStatus.String() + } + status, err := mfclients.ToStatus(g.Status) + if err != nil { + return mfgroups.Group{} + } + return mfgroups.Group{ + ID: g.ID, + Owner: g.OwnerID, + Parent: g.ParentID, + Name: g.Name, + Description: g.Description, + Metadata: mfclients.Metadata(g.Metadata), + Level: g.Level, + Path: g.Path, + Children: convertChildren(g.Children), + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + Status: status, + } +} + +func convertChildren(gs []*sdk.Group) []*mfgroups.Group { + cg := []*mfgroups.Group{} + + if len(gs) == 0 { + return cg + } + + for _, g := range gs { + insert := convertGroup(*g) + cg = append(cg, &insert) + } + + return cg +} + +func convertClient(c sdk.User) mfclients.Client { + if c.Status == "" { + c.Status = mfclients.EnabledStatus.String() + } + status, err := mfclients.ToStatus(c.Status) + if err != nil { + return mfclients.Client{} + } + return mfclients.Client{ + ID: c.ID, + Name: c.Name, + Tags: c.Tags, + Owner: c.Owner, + Credentials: mfclients.Credentials(c.Credentials), + Metadata: mfclients.Metadata(c.Metadata), + CreatedAt: c.CreatedAt, + UpdatedAt: c.UpdatedAt, + Status: status, + } +} + +func convertPolicyPage(pp sdk.PolicyPage) policies.PolicyPage { + return policies.PolicyPage{ + Page: policies.Page{ + Limit: pp.Limit, + Total: pp.Total, + Offset: pp.Offset, + }, + Policies: convertPolicies(pp.Policies), + } +} + +func TestMain(m *testing.M) { + exitCode := m.Run() + os.Exit(exitCode) +} diff --git a/pkg/sdk/go/things.go b/pkg/sdk/go/things.go index d41a13ea6f..4ab7e1f39c 100644 --- a/pkg/sdk/go/things.go +++ b/pkg/sdk/go/things.go @@ -1,45 +1,54 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package sdk import ( "encoding/json" "fmt" "net/http" - "strings" + "time" "github.com/mainflux/mainflux/pkg/errors" ) const ( - thingsEndpoint = "things" - connectEndpoint = "connect" - identifyEndpoint = "identify" + thingsEndpoint = "things" + connectEndpoint = "connect" + disconnectEndpoint = "disconnect" + identifyEndpoint = "identify" ) -type identifyThingReq struct { - Token string `json:"token,omitempty"` -} - -type identifyThingResp struct { - ID string `json:"id,omitempty"` +// Thing represents mainflux thing. +type Thing struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Credentials Credentials `json:"credentials"` + Tags []string `json:"tags,omitempty"` + Owner string `json:"owner,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + Status string `json:"status,omitempty"` } -func (sdk mfSDK) CreateThing(t Thing, token string) (string, errors.SDKError) { - data, err := json.Marshal(t) +// CreateThing creates a new client returning its id. +func (sdk mfSDK) CreateThing(thing Thing, token string) (Thing, errors.SDKError) { + data, err := json.Marshal(thing) if err != nil { - return "", errors.NewSDKError(err) + return Thing{}, errors.NewSDKError(err) } + url := fmt.Sprintf("%s/%s", sdk.thingsURL, thingsEndpoint) - headers, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) if sdkerr != nil { - return "", sdkerr + return Thing{}, sdkerr + } + + thing = Thing{} + if err := json.Unmarshal(body, &thing); err != nil { + return Thing{}, errors.NewSDKError(err) } - id := strings.TrimPrefix(headers.Get("Location"), fmt.Sprintf("/%s/", thingsEndpoint)) - return id, nil + return thing, nil } func (sdk mfSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDKError) { @@ -50,7 +59,7 @@ func (sdk mfSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDK url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, thingsEndpoint, "bulk") - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) if sdkerr != nil { return []Thing{}, sdkerr } @@ -63,6 +72,7 @@ func (sdk mfSDK) CreateThings(things []Thing, token string) ([]Thing, errors.SDK return ctr.Things, nil } +// Things returns page of clients. func (sdk mfSDK) Things(pm PageMetadata, token string) (ThingsPage, errors.SDKError) { url, err := sdk.withQueryParams(sdk.thingsURL, thingsEndpoint, pm) if err != nil { @@ -74,19 +84,21 @@ func (sdk mfSDK) Things(pm PageMetadata, token string) (ThingsPage, errors.SDKEr return ThingsPage{}, sdkerr } - var tp ThingsPage - if err := json.Unmarshal(body, &tp); err != nil { + var cp ThingsPage + if err := json.Unmarshal(body, &cp); err != nil { return ThingsPage{}, errors.NewSDKError(err) } - return tp, nil + return cp, nil } +// ThingsByChannel retrieves everything that is assigned to a group identified by groupID. func (sdk mfSDK) ThingsByChannel(chanID string, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { - url, err := sdk.withQueryParams(fmt.Sprintf("%s/channels/%s", sdk.thingsURL, chanID), thingsEndpoint, pm) + url, err := sdk.withQueryParams(sdk.thingsURL, fmt.Sprintf("channels/%s/%s", chanID, thingsEndpoint), pm) if err != nil { return ThingsPage{}, errors.NewSDKError(err) } + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) if sdkerr != nil { return ThingsPage{}, sdkerr @@ -100,12 +112,13 @@ func (sdk mfSDK) ThingsByChannel(chanID string, pm PageMetadata, token string) ( return tp, nil } +// Thing returns client object by id. func (sdk mfSDK) Thing(id, token string) (Thing, errors.SDKError) { url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, thingsEndpoint, id) - _, body, err := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) - if err != nil { - return Thing{}, err + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return Thing{}, sdkerr } var t Thing @@ -116,186 +129,138 @@ func (sdk mfSDK) Thing(id, token string) (Thing, errors.SDKError) { return t, nil } -func (sdk mfSDK) UpdateThing(t Thing, token string) errors.SDKError { +// UpdateThing updates existing client. +func (sdk mfSDK) UpdateThing(t Thing, token string) (Thing, errors.SDKError) { data, err := json.Marshal(t) if err != nil { - return errors.NewSDKError(err) + return Thing{}, errors.NewSDKError(err) } url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, thingsEndpoint, t.ID) - _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr -} - -func (sdk mfSDK) DeleteThing(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s", sdk.thingsURL, thingsEndpoint, id) - - _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) - return err -} - -func (sdk mfSDK) ShareThing(thingID, userID string, policies []string, token string) errors.SDKError { - sReq := shareThingReq{ - UserID: userID, - Policies: policies, - } - data, err := json.Marshal(sReq) - if err != nil { - return errors.NewSDKError(err) + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return Thing{}, sdkerr } - url := fmt.Sprintf("%s/%s/%s/share", sdk.thingsURL, thingsEndpoint, thingID) - - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr -} - -func (sdk mfSDK) UpdateThingKey(id, key, token string) errors.SDKError { - req := map[string]string{ - "key": key, - } - data, err := json.Marshal(req) - if err != nil { - return errors.NewSDKError(err) + t = Thing{} + if err := json.Unmarshal(body, &t); err != nil { + return Thing{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/%s/key", sdk.thingsURL, thingsEndpoint, id) - - _, _, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr + return t, nil } -func (sdk mfSDK) IdentifyThing(key string) (string, errors.SDKError) { - idReq := identifyThingReq{Token: key} - data, err := json.Marshal(idReq) +// UpdateThingTags updates the client's tags. +func (sdk mfSDK) UpdateThingTags(t Thing, token string) (Thing, errors.SDKError) { + data, err := json.Marshal(t) if err != nil { - return "", errors.NewSDKError(err) + return Thing{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.thingsURL, identifyEndpoint) + url := fmt.Sprintf("%s/%s/%s/tags", sdk.thingsURL, thingsEndpoint, t.ID) - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusOK) + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) if sdkerr != nil { - return "", sdkerr + return Thing{}, sdkerr } - var i identifyThingResp - if err := json.Unmarshal(body, &i); err != nil { - return "", errors.NewSDKError(err) + t = Thing{} + if err := json.Unmarshal(body, &t); err != nil { + return Thing{}, errors.NewSDKError(err) } - return i.ID, nil + return t, nil } -func (sdk mfSDK) AccessByThingKey(channelID, key string) (string, errors.SDKError) { - idReq := identifyThingReq{Token: key} - data, err := json.Marshal(idReq) +// UpdateThingSecret updates the client's secret +func (sdk mfSDK) UpdateThingSecret(id, secret, token string) (Thing, errors.SDKError) { + var ucsr = updateThingSecretReq{Secret: secret} + + data, err := json.Marshal(ucsr) if err != nil { - return "", errors.NewSDKError(err) + return Thing{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/%s/%s/access-by-key", sdk.thingsURL, identifyEndpoint, channelsEndpoint, channelID) + url := fmt.Sprintf("%s/%s/%s/secret", sdk.thingsURL, thingsEndpoint, id) - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusOK) + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) if sdkerr != nil { - return "", sdkerr + return Thing{}, sdkerr } - var i identifyThingResp - if err := json.Unmarshal(body, &i); err != nil { - return "", errors.NewSDKError(err) + var t Thing + if err = json.Unmarshal(body, &t); err != nil { + return Thing{}, errors.NewSDKError(err) } - return i.ID, nil + return t, nil } -func (sdk mfSDK) AccessByThingID(channelID, id string) errors.SDKError { - req := map[string]string{ - "thing_id": id, - } - data, err := json.Marshal(req) +// UpdateThingOwner updates the client's owner. +func (sdk mfSDK) UpdateThingOwner(t Thing, token string) (Thing, errors.SDKError) { + data, err := json.Marshal(t) if err != nil { - return errors.NewSDKError(err) + return Thing{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/%s/%s/access-by-id", sdk.thingsURL, identifyEndpoint, channelsEndpoint, channelID) + url := fmt.Sprintf("%s/%s/%s/owner", sdk.thingsURL, thingsEndpoint, t.ID) - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusOK) - return sdkerr -} - -func (sdk mfSDK) Connect(connIDs ConnectionIDs, token string) errors.SDKError { - data, err := json.Marshal(connIDs) - if err != nil { - return errors.NewSDKError(err) + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return Thing{}, sdkerr } - url := fmt.Sprintf("%s/%s", sdk.thingsURL, connectEndpoint) - - _, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr -} - -func (sdk mfSDK) Disconnect(connIDs ConnectionIDs, token string) errors.SDKError { - data, err := json.Marshal(connIDs) - if err != nil { - return errors.NewSDKError(err) + t = Thing{} + if err = json.Unmarshal(body, &t); err != nil { + return Thing{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.thingsURL, connectEndpoint) - - _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr + return t, nil } -func (sdk mfSDK) ConnectThing(thingID, chanID, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, chanID, thingsEndpoint, thingID) +// EnableThing changes client status to enabled. +func (sdk mfSDK) EnableThing(id, token string) (Thing, errors.SDKError) { + return sdk.changeThingStatus(id, enableEndpoint, token) +} - _, _, err := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), nil, http.StatusOK) - return err +// DisableThing changes client status to disabled - soft delete. +func (sdk mfSDK) DisableThing(id, token string) (Thing, errors.SDKError) { + return sdk.changeThingStatus(id, disableEndpoint, token) } -func (sdk mfSDK) DisconnectThing(thingID, chanID, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s/%s/%s", sdk.thingsURL, channelsEndpoint, chanID, thingsEndpoint, thingID) +func (sdk mfSDK) changeThingStatus(id, status, token string) (Thing, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s/%s", sdk.thingsURL, thingsEndpoint, id, status) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return Thing{}, sdkerr + } - _, _, err := sdk.processRequest(http.MethodDelete, url, token, string(CTJSON), nil, http.StatusNoContent) - return err + t := Thing{} + if err := json.Unmarshal(body, &t); err != nil { + return Thing{}, errors.NewSDKError(err) + } + + return t, nil } -func (sdk mfSDK) SearchThing(t Thing, pm PageMetadata, token string) (ThingsPage, errors.SDKError) { - sReq := searchThingReq{ - Thing: t, Total: pm.Total, Offset: pm.Offset, Limit: pm.Limit, - } - data, err := json.Marshal(sReq) +func (sdk mfSDK) IdentifyThing(key string) (string, errors.SDKError) { + idReq := identifyThingReq{Token: key} + data, err := json.Marshal(idReq) if err != nil { - return ThingsPage{}, errors.NewSDKError(err) + return "", errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s/search", sdk.thingsURL, thingsEndpoint) - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusOK) + url := fmt.Sprintf("%s/%s", sdk.thingsURL, identifyEndpoint) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusOK) if sdkerr != nil { - return ThingsPage{}, sdkerr + return "", sdkerr } - var tp ThingsPage - if err := json.Unmarshal(body, &tp); err != nil { - return ThingsPage{}, errors.NewSDKError(err) + var i identifyThingResp + if err := json.Unmarshal(body, &i); err != nil { + return "", errors.NewSDKError(err) } - return tp, nil -} - -type searchThingReq struct { - Total uint64 `json:"total,omitempty"` - Offset uint64 `json:"offset,omitempty"` - Limit uint64 `json:"limit,omitempty"` - Order string `json:"order,omitempty"` - Dir string `json:"dir,omitempty"` - Thing -} - -type shareThingReq struct { - UserID string `json:"user_id,omitempty"` - Policies []string `json:"policies,omitempty"` + return i.ID, nil } diff --git a/pkg/sdk/go/things_test.go b/pkg/sdk/go/things_test.go deleted file mode 100644 index 1ef6fa046b..0000000000 --- a/pkg/sdk/go/things_test.go +++ /dev/null @@ -1,914 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package sdk_test - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - authapi "github.com/mainflux/mainflux/things/api/auth/http" - httpapi "github.com/mainflux/mainflux/things/api/things/http" - "github.com/mainflux/mainflux/things/mocks" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" -) - -const ( - contentType = "application/senml+json" - email = "user@example.com" - adminEmail = "admin@example.com" - otherEmail = "other_user@example.com" - token = "token" - otherToken = "other_token" - wrongValue = "wrong_value" - badKey = "999" -) - -var ( - metadata = map[string]interface{}{"meta": "data"} - metadata2 = map[string]interface{}{"meta": "data2"} - th1 = sdk.Thing{ID: "fe6b4e92-cc98-425e-b0aa-000000000001", Name: "test1", Metadata: metadata} - th2 = sdk.Thing{ID: "fe6b4e92-cc98-425e-b0aa-000000000002", Name: "test2", Metadata: metadata} - emptyThing = sdk.Thing{} -) - -func newThingsService(tokens map[string]string) things.Service { - userPolicy := mocks.MockSubjectSet{Object: "users", Relation: "member"} - adminPolicy := mocks.MockSubjectSet{Object: "authorities", Relation: "member"} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{ - adminEmail: {userPolicy, adminPolicy}, email: {userPolicy}}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} - -func newThingsServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) - return httptest.NewServer(mux) -} - -func newAuthServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := authapi.MakeHandler(mocktracer.New(), svc, logger) - return httptest.NewServer(mux) -} - -func TestCreateThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - cases := []struct { - desc string - thing sdk.Thing - token string - err error - thID string - }{ - { - desc: "create new thing", - thing: th1, - token: token, - err: nil, - thID: th1.ID, - }, - { - desc: "create new empty thing", - thing: emptyThing, - token: token, - err: nil, - thID: fmt.Sprintf("%s%012d", uuid.Prefix, 2), - }, - { - desc: "create new thing with empty token", - thing: th1, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - thID: "", - }, - { - desc: "create new thing with invalid token", - thing: th1, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - thID: "", - }, - } - for _, tc := range cases { - id, err := mainfluxSDK.CreateThing(tc.thing, tc.token) - - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.thID, id, fmt.Sprintf("%s: expected location %s got %s", tc.desc, tc.thID, id)) - } -} - -func TestCreateThings(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - things := []sdk.Thing{ - th1, - th2, - } - thsExtID := []sdk.Thing{ - {ID: th1.ID, Name: "1", Key: "1", Metadata: metadata}, - {ID: th2.ID, Name: "2", Key: "2", Metadata: metadata}, - } - thsWrongExtID := []sdk.Thing{ - {ID: "b0aa-000000000001", Name: "1", Key: "1", Metadata: metadata}, - {ID: "b0aa-000000000002", Name: "2", Key: "2", Metadata: metadata2}, - } - - cases := []struct { - desc string - things []sdk.Thing - token string - err error - res []sdk.Thing - }{ - { - desc: "create new things", - things: things, - token: token, - err: nil, - res: things, - }, - { - desc: "create new things with empty things", - things: []sdk.Thing{}, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrEmptyList, http.StatusBadRequest), - res: []sdk.Thing{}, - }, - { - desc: "create new thing with empty token", - things: things, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - res: []sdk.Thing{}, - }, - { - desc: "create new thing with invalid token", - things: things, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - res: []sdk.Thing{}, - }, - { - desc: "create new things with external UUID", - things: thsExtID, - token: token, - err: nil, - res: things, - }, - { - desc: "create new things with wrong external UUID", - things: thsWrongExtID, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrInvalidIDFormat, http.StatusBadRequest), - res: []sdk.Thing{}, - }, - } - for _, tc := range cases { - res, err := mainfluxSDK.CreateThings(tc.things, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - - for idx := range tc.res { - assert.Equal(t, tc.res[idx].ID, res[idx].ID, fmt.Sprintf("%s: expected response ID %s got %s", tc.desc, tc.res[idx].ID, res[idx].ID)) - } - } -} - -func TestThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateThing(th1, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - th1.Key = fmt.Sprintf("%s%012d", uuid.Prefix, 1) - - cases := []struct { - desc string - thID string - token string - err error - response sdk.Thing - }{ - { - desc: "get existing thing", - thID: id, - token: token, - err: nil, - response: th1, - }, - { - desc: "get non-existent thing", - thID: "43", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - response: sdk.Thing{}, - }, - { - desc: "get thing with invalid token", - thID: id, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: sdk.Thing{}, - }, - } - - for _, tc := range cases { - respTh, err := mainfluxSDK.Thing(tc.thID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, respTh, fmt.Sprintf("%s: expected response thing %s, got %s", tc.desc, tc.response, respTh)) - } -} - -func TestThings(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - var things []sdk.Thing - - mainfluxSDK := sdk.NewSDK(sdkConf) - for i := 1; i < 101; i++ { - id := fmt.Sprintf("%s%012d", chPrefix, i) - name := fmt.Sprintf("test-%d", i) - th := sdk.Thing{ID: id, Name: name, Metadata: metadata} - _, err := mainfluxSDK.CreateThing(th, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - th.Key = fmt.Sprintf("%s%012d", uuid.Prefix, i) - things = append(things, th) - } - - cases := []struct { - desc string - token string - offset uint64 - limit uint64 - err error - response []sdk.Thing - name string - metadata map[string]interface{} - }{ - { - desc: "get a list of things", - token: token, - offset: offset, - limit: limit, - err: nil, - response: things[0:limit], - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of things with invalid token", - token: wrongValue, - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of things with empty token", - token: "", - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of things with zero limit", - token: token, - offset: 0, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of things with limit greater than max", - token: token, - offset: offset, - limit: 110, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - metadata: make(map[string]interface{}), - }, - { - desc: "get a list of things with offset greater than max", - token: token, - offset: 110, - limit: limit, - err: nil, - response: []sdk.Thing{}, - metadata: make(map[string]interface{}), - }, - } - for _, tc := range cases { - filter := sdk.PageMetadata{ - Name: tc.name, - Total: total, - Offset: uint64(tc.offset), - Limit: uint64(tc.limit), - Metadata: tc.metadata, - } - page, err := mainfluxSDK.Things(filter, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, page.Things, fmt.Sprintf("%s: got incorrect list of things from Things()", tc.desc)) - } -} - -func TestThingsByChannel(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - ch := sdk.Channel{Name: "test_channel"} - cid, err := mainfluxSDK.CreateChannel(ch, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - var n = 10 - var thsDiscoNum = 1 - var things []sdk.Thing - for i := 1; i < n+1; i++ { - id := fmt.Sprintf("%s%012d", chPrefix, i) - name := fmt.Sprintf("test-%d", i) - th := sdk.Thing{ - ID: id, - Name: name, - Metadata: metadata, - Key: fmt.Sprintf("%s%012d", uuid.Prefix, 2*i+1), - } - tid, err := mainfluxSDK.CreateThing(th, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - things = append(things, th) - - // Don't connect last Thing - if i == n+1-thsDiscoNum { - break - } - - // Don't connect last 2 Channels - conIDs := sdk.ConnectionIDs{ - ChannelIDs: []string{cid}, - ThingIDs: []string{tid}, - } - err = mainfluxSDK.Connect(conIDs, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - cases := []struct { - desc string - channel string - token string - offset uint64 - limit uint64 - disconnected bool - err error - response []sdk.Thing - }{ - { - desc: "get a list of things by channel", - channel: cid, - token: token, - offset: offset, - limit: limit, - err: nil, - response: things[0:limit], - }, - { - desc: "get a list of things by channel with invalid token", - channel: cid, - token: wrongValue, - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: nil, - }, - { - desc: "get a list of things by channel with empty token", - channel: cid, - token: "", - offset: offset, - limit: limit, - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - response: nil, - }, - { - desc: "get a list of things by channel with zero limit", - channel: cid, - token: token, - offset: offset, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of things by channel with limit greater than max", - channel: cid, - token: token, - offset: offset, - limit: 110, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of things by channel with offset greater than max", - channel: cid, - token: token, - offset: 110, - limit: limit, - err: nil, - response: []sdk.Thing{}, - }, - { - desc: "get a list of things by channel with invalid args (zero limit) and invalid token", - channel: cid, - token: wrongValue, - offset: offset, - limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - response: nil, - }, - { - desc: "get a list of not connected things by channel", - channel: cid, - token: token, - offset: offset, - limit: 100, - disconnected: true, - err: nil, - response: []sdk.Thing{things[n-thsDiscoNum]}, - }, - } - for _, tc := range cases { - pm := sdk.PageMetadata{ - Offset: tc.offset, - Limit: tc.limit, - Disconnected: tc.disconnected, - } - page, err := mainfluxSDK.ThingsByChannel(tc.channel, pm, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, page.Things, fmt.Sprintf("%s: got incorrect list of things from ThingsByChannel()", tc.desc)) - } -} - -func TestUpdateThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateThing(th1, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - th1.Name = "test2" - - cases := []struct { - desc string - thing sdk.Thing - token string - err error - }{ - { - desc: "update existing thing", - thing: sdk.Thing{ - ID: id, - Name: "test_app", - Metadata: metadata2, - }, - token: token, - err: nil, - }, - { - desc: "update non-existing thing", - thing: sdk.Thing{ - ID: "0", - Name: "test_device", - Metadata: metadata, - }, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthorization, http.StatusForbidden), - }, - { - desc: "update channel with an empty id", - thing: sdk.Thing{ - ID: "", - Name: "test_device", - Metadata: metadata, - }, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "update channel with invalid token", - thing: sdk.Thing{ - ID: id, - Name: "test_app", - Metadata: metadata2, - }, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "update channel with empty token", - thing: sdk.Thing{ - ID: id, - Name: "test_app", - Metadata: metadata2, - }, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - } - - for _, tc := range cases { - err := mainfluxSDK.UpdateThing(tc.thing, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestDeleteThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - id, err := mainfluxSDK.CreateThing(th1, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - thingID string - token string - err error - }{ - { - desc: "delete thing with invalid token", - thingID: id, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "delete non-existing thing", - thingID: "2", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "delete thing with invalid id", - thingID: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "delete thing with empty token", - thingID: id, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "delete existing thing", - thingID: id, - token: token, - err: nil, - }, - { - desc: "delete deleted thing", - thingID: id, - token: token, - err: nil, - }, - } - - for _, tc := range cases { - err := mainfluxSDK.DeleteThing(tc.thingID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestIdentifyThing(t *testing.T) { - svc := newThingsService(map[string]string{token: email}) - ts := newThingsServer(svc) - as := newAuthServer(svc) - defer ts.Close() - defer as.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - authSdkConf := sdk.Config{ - ThingsURL: as.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - mainfluxAuthSDK := sdk.NewSDK(authSdkConf) - th := sdk.Thing{ID: "fe6b4e92-cc98-425e-b0aa-000000007891", Name: "identify"} - id, err := mainfluxSDK.CreateThing(th, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - thing, err := mainfluxSDK.Thing(th.ID, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - thingKey string - err error - response string - }{ - { - desc: "identify thing with a valid key", - thingKey: thing.Key, - err: nil, - response: id, - }, - { - desc: "identify thing with an invalid key", - thingKey: badKey, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - response: "", - }, - { - desc: "identify thing with an empty key", - thingKey: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerKey, http.StatusUnauthorized), - response: "", - }, - } - - for _, tc := range cases { - thingID, err := mainfluxAuthSDK.IdentifyThing(tc.thingKey) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, thingID, fmt.Sprintf("%s: expected response id %s, got %s", tc.desc, tc.response, thingID)) - } -} - -func TestConnectThing(t *testing.T) { - svc := newThingsService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - thingID, err := mainfluxSDK.CreateThing(th1, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - chanID1, err := mainfluxSDK.CreateChannel(ch2, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - chanID2, err := mainfluxSDK.CreateChannel(ch3, otherToken) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - thingID string - chanID string - token string - err error - }{ - { - desc: "connect existing thing to existing channel", - thingID: thingID, - chanID: chanID1, - token: token, - err: nil, - }, - - { - desc: "connect existing thing to non-existing channel", - thingID: thingID, - chanID: "9", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "connect non-existing thing to existing channel", - thingID: "9", - chanID: chanID1, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "connect existing thing to channel with invalid ID", - thingID: thingID, - chanID: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "connect thing with missing ID to existing channel", - thingID: "", - chanID: chanID1, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - - { - desc: "connect existing thing to existing channel with invalid token", - thingID: thingID, - chanID: chanID1, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "connect existing thing to existing channel with empty token", - thingID: thingID, - chanID: chanID1, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "connect thing from owner to channel of other user", - thingID: thingID, - chanID: chanID2, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - } - - for _, tc := range cases { - conIDs := sdk.ConnectionIDs{ - ChannelIDs: []string{tc.chanID}, - ThingIDs: []string{tc.thingID}, - } - err := mainfluxSDK.Connect(conIDs, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestDisconnectThing(t *testing.T) { - svc := newThingsService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - - ts := newThingsServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - ThingsURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - - thingID, err := mainfluxSDK.CreateThing(th1, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - chanID1, err := mainfluxSDK.CreateChannel(ch2, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - conIDs := sdk.ConnectionIDs{ - ChannelIDs: []string{chanID1}, - ThingIDs: []string{thingID}, - } - err = mainfluxSDK.Connect(conIDs, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - chanID2, err := mainfluxSDK.CreateChannel(ch2, otherToken) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - thingID string - chanID string - token string - err error - }{ - { - desc: "disconnect connected thing from channel", - thingID: thingID, - chanID: chanID1, - token: token, - err: nil, - }, - { - desc: "disconnect existing thing from non-existing channel", - thingID: thingID, - chanID: "9", - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "disconnect non-existing thing from existing channel", - thingID: "9", - chanID: chanID1, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - { - desc: "disconnect existing thing from channel with invalid ID", - thingID: thingID, - chanID: "", - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "disconnect thing with invalid ID from existing channel", - thingID: "", - chanID: chanID1, - token: token, - err: errors.NewSDKErrorWithStatus(apiutil.ErrMissingID, http.StatusBadRequest), - }, - { - desc: "disconnect existing thing from existing channel with invalid token", - thingID: thingID, - chanID: chanID1, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - }, - { - desc: "disconnect existing thing from existing channel with empty token", - thingID: thingID, - chanID: chanID1, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - }, - { - desc: "disconnect owner's thing from someone elses channel", - thingID: thingID, - chanID: chanID2, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), - }, - } - - for _, tc := range cases { - err := mainfluxSDK.DisconnectThing(tc.thingID, tc.chanID, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - } -} diff --git a/pkg/sdk/go/tokens.go b/pkg/sdk/go/tokens.go new file mode 100644 index 0000000000..be021d9012 --- /dev/null +++ b/pkg/sdk/go/tokens.go @@ -0,0 +1,59 @@ +package sdk + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/mainflux/mainflux/pkg/errors" +) + +// Token is used for authentication purposes. +// It contains AccessToken, RefreshToken and AccessExpiry. +type Token struct { + AccessToken string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + AccessType string `json:"access_type,omitempty"` +} + +// CreateToken receives credentials and returns user token. +func (sdk mfSDK) CreateToken(user User) (Token, errors.SDKError) { + var treq = tokenReq{ + Identity: user.Credentials.Identity, + Secret: user.Credentials.Secret, + } + data, err := json.Marshal(treq) + if err != nil { + return Token{}, errors.NewSDKError(err) + } + + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, issueTokenEndpoint) + + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusCreated) + if sdkerr != nil { + return Token{}, sdkerr + } + var token Token + if err := json.Unmarshal(body, &token); err != nil { + return Token{}, errors.NewSDKError(err) + } + + return token, nil +} + +// RefreshToken refreshes expired access tokens. +func (sdk mfSDK) RefreshToken(token string) (Token, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, refreshTokenEndpoint) + + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), []byte{}, http.StatusCreated) + if sdkerr != nil { + return Token{}, sdkerr + } + + var t = Token{} + if err := json.Unmarshal(body, &t); err != nil { + return Token{}, errors.NewSDKError(err) + } + + return t, nil +} diff --git a/pkg/sdk/go/users.go b/pkg/sdk/go/users.go index 474c7fff31..8c4d86da90 100644 --- a/pkg/sdk/go/users.go +++ b/pkg/sdk/go/users.go @@ -1,134 +1,265 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package sdk import ( "encoding/json" "fmt" "net/http" - "strings" + "time" "github.com/mainflux/mainflux/pkg/errors" ) const ( - usersEndpoint = "users" - tokensEndpoint = "tokens" - passwordEndpoint = "password" - membersEndpoint = "members" + usersEndpoint = "users" + enableEndpoint = "enable" + disableEndpoint = "disable" + issueTokenEndpoint = "tokens/issue" + refreshTokenEndpoint = "tokens/refresh" + membersEndpoint = "members" ) -func (sdk mfSDK) CreateUser(u User, token string) (string, errors.SDKError) { - data, err := json.Marshal(u) +// User represents mainflux user its credentials. +type User struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Credentials Credentials `json:"credentials"` + Tags []string `json:"tags,omitempty"` + Owner string `json:"owner,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + Status string `json:"status,omitempty"` + Role string `json:"role,omitempty"` +} + +// CreateUser creates a new client returning its id. +func (sdk mfSDK) CreateUser(user User, token string) (User, errors.SDKError) { + data, err := json.Marshal(user) if err != nil { - return "", errors.NewSDKError(err) + return User{}, errors.NewSDKError(err) } + url := fmt.Sprintf("%s/%s", sdk.usersURL, usersEndpoint) - headers, _, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), data, http.StatusCreated) if sdkerr != nil { - return "", sdkerr + return User{}, sdkerr } - id := strings.TrimPrefix(headers.Get("Location"), fmt.Sprintf("/%s/", usersEndpoint)) - return id, nil + user = User{} + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + return user, nil } -func (sdk mfSDK) User(userID, token string) (User, errors.SDKError) { - url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, userID) +// Users returns page of users. +func (sdk mfSDK) Users(pm PageMetadata, token string) (UsersPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.usersURL, usersEndpoint, pm) + if err != nil { + return UsersPage{}, errors.NewSDKError(err) + } - _, body, err := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return UsersPage{}, sdkerr + } + + var cp UsersPage + if err := json.Unmarshal(body, &cp); err != nil { + return UsersPage{}, errors.NewSDKError(err) + } + + return cp, nil +} + +// Members retrieves everything that is assigned to a group identified by groupID. +func (sdk mfSDK) Members(groupID string, meta PageMetadata, token string) (MembersPage, errors.SDKError) { + url, err := sdk.withQueryParams(sdk.usersURL, fmt.Sprintf("%s/%s/%s", groupsEndpoint, groupID, membersEndpoint), meta) if err != nil { - return User{}, err + return MembersPage{}, errors.NewSDKError(err) } - var u User - if err := json.Unmarshal(body, &u); err != nil { - return User{}, errors.NewSDKError(err) + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return MembersPage{}, sdkerr + } + + var mp MembersPage + if err := json.Unmarshal(body, &mp); err != nil { + return MembersPage{}, errors.NewSDKError(err) } - return u, nil + return mp, nil } -func (sdk mfSDK) Users(pm PageMetadata, token string) (UsersPage, errors.SDKError) { - var url string - var err error +// User returns user object by id. +func (sdk mfSDK) User(id, token string) (User, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, id) - if url, err = sdk.withQueryParams(sdk.usersURL, usersEndpoint, pm); err != nil { - return UsersPage{}, errors.NewSDKError(err) + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr + } + + var user User + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) } + return user, nil +} + +// User returns user object by id. +func (sdk mfSDK) UserProfile(token string) (User, errors.SDKError) { + url := fmt.Sprintf("%s/%s/profile", sdk.usersURL, usersEndpoint) + _, body, sdkerr := sdk.processRequest(http.MethodGet, url, token, string(CTJSON), nil, http.StatusOK) if sdkerr != nil { - return UsersPage{}, sdkerr + return User{}, sdkerr } - var up UsersPage - if err := json.Unmarshal(body, &up); err != nil { - return UsersPage{}, errors.NewSDKError(err) + var user User + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) } - return up, nil + return user, nil } -func (sdk mfSDK) CreateToken(user User) (string, errors.SDKError) { +// UpdateUser updates existing user. +func (sdk mfSDK) UpdateUser(user User, token string) (User, errors.SDKError) { data, err := json.Marshal(user) if err != nil { - return "", errors.NewSDKError(err) + return User{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.usersURL, tokensEndpoint) + url := fmt.Sprintf("%s/%s/%s", sdk.usersURL, usersEndpoint, user.ID) - _, body, sdkerr := sdk.processRequest(http.MethodPost, url, "", string(CTJSON), data, http.StatusCreated) + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) if sdkerr != nil { - return "", sdkerr + return User{}, sdkerr + } + + user = User{} + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + return user, nil +} + +// UpdateUserTags updates the user's tags. +func (sdk mfSDK) UpdateUserTags(user User, token string) (User, errors.SDKError) { + data, err := json.Marshal(user) + if err != nil { + return User{}, errors.NewSDKError(err) } - var tr tokenRes - if err := json.Unmarshal(body, &tr); err != nil { - return "", errors.NewSDKError(err) + url := fmt.Sprintf("%s/%s/%s/tags", sdk.usersURL, usersEndpoint, user.ID) + + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr } - return tr.Token, nil + user = User{} + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + return user, nil } -func (sdk mfSDK) UpdateUser(u User, token string) errors.SDKError { - data, err := json.Marshal(u) +// UpdateUserIdentity updates the user's identity +func (sdk mfSDK) UpdateUserIdentity(user User, token string) (User, errors.SDKError) { + ucir := updateClientIdentityReq{token: token, id: user.ID, Identity: user.Credentials.Identity} + + data, err := json.Marshal(ucir) if err != nil { - return errors.NewSDKError(err) + return User{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.usersURL, usersEndpoint) + url := fmt.Sprintf("%s/%s/%s/identity", sdk.usersURL, usersEndpoint, user.ID) - _, _, sdkerr := sdk.processRequest(http.MethodPut, url, token, string(CTJSON), data, http.StatusOK) - return sdkerr + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr + } + + user = User{} + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + + return user, nil } -func (sdk mfSDK) UpdatePassword(oldPass, newPass, token string) errors.SDKError { - ur := UserPasswordReq{ - OldPassword: oldPass, - Password: newPass, +// UpdatePassword updates user password. +func (sdk mfSDK) UpdatePassword(oldPass, newPass, token string) (User, errors.SDKError) { + var ucsr = updateClientSecretReq{OldSecret: oldPass, NewSecret: newPass} + + data, err := json.Marshal(ucsr) + if err != nil { + return User{}, errors.NewSDKError(err) + } + + url := fmt.Sprintf("%s/%s/secret", sdk.usersURL, usersEndpoint) + + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr + } + + var user User + if err = json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) } - data, err := json.Marshal(ur) + + return user, nil +} + +// UpdateUserOwner updates the user's owner. +func (sdk mfSDK) UpdateUserOwner(user User, token string) (User, errors.SDKError) { + data, err := json.Marshal(user) if err != nil { - return errors.NewSDKError(err) + return User{}, errors.NewSDKError(err) } - url := fmt.Sprintf("%s/%s", sdk.usersURL, passwordEndpoint) + url := fmt.Sprintf("%s/%s/%s/owner", sdk.usersURL, usersEndpoint, user.ID) + + _, body, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr + } - _, _, sdkerr := sdk.processRequest(http.MethodPatch, url, token, string(CTJSON), data, http.StatusCreated) - return sdkerr + user = User{} + if err = json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + + return user, nil } -func (sdk mfSDK) EnableUser(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s/enable", sdk.usersURL, usersEndpoint, id) - _, _, err := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusNoContent) - return err +// EnableUser changes the status of the user to enabled. +func (sdk mfSDK) EnableUser(id, token string) (User, errors.SDKError) { + return sdk.changeClientStatus(token, id, enableEndpoint) } -func (sdk mfSDK) DisableUser(id, token string) errors.SDKError { - url := fmt.Sprintf("%s/%s/%s/disable", sdk.usersURL, usersEndpoint, id) - _, _, err := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusNoContent) - return err +// DisableUser changes the status of the user to disabled. +func (sdk mfSDK) DisableUser(id, token string) (User, errors.SDKError) { + return sdk.changeClientStatus(token, id, disableEndpoint) +} + +func (sdk mfSDK) changeClientStatus(token, id, status string) (User, errors.SDKError) { + url := fmt.Sprintf("%s/%s/%s/%s", sdk.usersURL, usersEndpoint, id, status) + _, body, sdkerr := sdk.processRequest(http.MethodPost, url, token, string(CTJSON), nil, http.StatusOK) + if sdkerr != nil { + return User{}, sdkerr + } + + user := User{} + if err := json.Unmarshal(body, &user); err != nil { + return User{}, errors.NewSDKError(err) + } + + return user, nil } diff --git a/pkg/sdk/go/users_test.go b/pkg/sdk/go/users_test.go index ac3a185e40..bb6a1876b2 100644 --- a/pkg/sdk/go/users_test.go +++ b/pkg/sdk/go/users_test.go @@ -1,6 +1,3 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package sdk_test import ( @@ -8,254 +5,247 @@ import ( "fmt" "net/http" "net/http/httptest" - "regexp" "testing" + "time" - "github.com/mainflux/mainflux" - mfauth "github.com/mainflux/mainflux/auth" + "github.com/go-zoo/bone" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/internal/testsutil" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" sdk "github.com/mainflux/mainflux/pkg/sdk/go" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - "github.com/mainflux/mainflux/users/api" - "github.com/mainflux/mainflux/users/mocks" - "github.com/opentracing/opentracing-go/mocktracer" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/clients/api" + "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/jwt" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) -const ( - invalidEmail = "userexample.com" -) - -var ( - passRegex = regexp.MustCompile("^.{8,}$") - limit uint64 = 5 - offset uint64 = 0 - total uint64 = 200 -) - -func newUserService() users.Service { - usersRepo := mocks.NewUserRepository() - hasher := mocks.NewHasher() - userEmail := "user@example.com" - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[userEmail] = append(mockAuthzDB[userEmail], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{userEmail: userEmail}, mockAuthzDB) - - emailer := mocks.NewEmailer() - idProvider := uuid.New() +var id = generateUUID(&testing.T{}) - return users.New(usersRepo, hasher, auth, emailer, idProvider, passRegex) -} - -func newUserServer(svc users.Service) *httptest.Server { - logger := logger.NewMock() - mux := api.MakeHandler(svc, mocktracer.New(), logger) +func newClientServer(svc clients.Service) *httptest.Server { + logger := mflog.NewMock() + mux := bone.New() + api.MakeClientsHandler(svc, mux, logger) return httptest.NewServer(mux) } -func TestCreateUser(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - user := sdk.User{Email: "user@example.com", Password: "password"} +func TestCreateClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() + user := sdk.User{ + Credentials: sdk.Credentials{Identity: "admin@example.com", Secret: "secret"}, + Status: mfclients.EnabledStatus.String(), + } + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) - mainfluxSDK := sdk.NewSDK(sdkConf) cases := []struct { - desc string - user sdk.User - token string - err errors.SDKError + desc string + client sdk.User + response sdk.User + token string + err errors.SDKError }{ { - desc: "register new user", - user: user, - token: token, - err: nil, + desc: "register new user", + client: user, + response: user, + token: token, + err: nil, }, { - desc: "register existing user", - user: user, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrConflict, http.StatusConflict), + desc: "register existing user", + client: user, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedCreation, http.StatusInternalServerError), }, { - desc: "register user with invalid email address", - user: sdk.User{Email: invalidEmail, Password: "password"}, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), + desc: "register user with invalid identity", + client: sdk.User{ + Credentials: sdk.Credentials{ + Identity: invalidIdentity, + Secret: "password", + }, + }, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), }, { - desc: "register user with empty password", - user: sdk.User{Email: "user2@example.com", Password: ""}, - token: token, - err: errors.NewSDKErrorWithStatus(users.ErrPasswordFormat, http.StatusBadRequest), + desc: "register user with empty secret", + client: sdk.User{ + Credentials: sdk.Credentials{ + Identity: Identity + "2", + Secret: "", + }, + }, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), }, { - desc: "register user without password", - user: sdk.User{Email: "user2@example.com"}, - token: token, - err: errors.NewSDKErrorWithStatus(users.ErrPasswordFormat, http.StatusBadRequest), + desc: "register user with no secret", + client: sdk.User{ + Credentials: sdk.Credentials{ + Identity: Identity + "2", + }, + }, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), }, { - desc: "register user without email", - user: sdk.User{Password: "password"}, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), + desc: "register user with empty identity", + client: sdk.User{ + Credentials: sdk.Credentials{ + Identity: "", + Secret: secret, + }, + }, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), }, { - desc: "register empty user", - user: sdk.User{}, - token: token, - err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), - }, - } - - for _, tc := range cases { - _, err := mainfluxSDK.CreateUser(tc.user, tc.token) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - } -} - -func TestUser(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - user := sdk.User{Email: "user@example.com", Password: "password"} - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() - userID, err := mainfluxSDK.CreateUser(user, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - usertoken, err := mainfluxSDK.CreateToken(user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - user.ID = userID - user.Password = "" - - cases := []struct { - desc string - userID string - token string - err errors.SDKError - response sdk.User - }{ - { - desc: "get existing user", - userID: userID, - token: usertoken, - err: nil, - response: user, + desc: "register user with no identity", + client: sdk.User{ + Credentials: sdk.Credentials{ + Secret: secret, + }, + }, + response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), }, { - desc: "get non-existent user", - userID: "43", - token: usertoken, - err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + desc: "register empty user", + client: sdk.User{}, response: sdk.User{}, + token: token, + err: errors.NewSDKErrorWithStatus(apiutil.ErrMalformedEntity, http.StatusBadRequest), }, - { - desc: "get user with invalid token", - userID: userID, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - response: sdk.User{}, + desc: "register user with every field defined", + client: sdk.User{ + ID: id, + Name: "name", + Tags: []string{"tag1", "tag2"}, + Owner: id, + Credentials: user.Credentials, + Metadata: validMetadata, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Status: mfclients.EnabledStatus.String(), + }, + response: sdk.User{ + ID: id, + Name: "name", + Tags: []string{"tag1", "tag2"}, + Owner: id, + Credentials: user.Credentials, + Metadata: validMetadata, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Status: mfclients.EnabledStatus.String(), + }, + token: token, + err: nil, }, } for _, tc := range cases { - respUs, err := mainfluxSDK.User(tc.userID, tc.token) + repoCall := cRepo.On("Save", mock.Anything, mock.Anything).Return(tc.response, tc.err) + rClient, err := clientSDK.CreateUser(tc.client, tc.token) + tc.response.ID = rClient.ID + tc.response.CreatedAt = rClient.CreatedAt + tc.response.UpdatedAt = rClient.UpdatedAt + rClient.Credentials.Secret = tc.response.Credentials.Secret assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, respUs, fmt.Sprintf("%s: expected response user %s, got %s", tc.desc, tc.response, respUs)) + assert.Equal(t, tc.response, rClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, rClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "Save", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + } + repoCall.Unset() } } -func TestUsers(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) - defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, - } - - mainfluxSDK := sdk.NewSDK(sdkConf) - user := sdk.User{Email: "user@example.com", Password: "password"} +func TestListClients(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() - var users []sdk.User + var cls []sdk.User + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) for i := 10; i < 100; i++ { - email := fmt.Sprintf("user%d@example.com", i) - password := fmt.Sprintf("password%d", i) - metadata := map[string]interface{}{"name": fmt.Sprintf("user%d", i)} - us := sdk.User{Email: email, Password: password, Metadata: metadata} - userID, err := mainfluxSDK.CreateUser(us, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - us.ID = userID - us.Password = "" - users = append(users, us) + cl := sdk.User{ + ID: generateUUID(t), + Name: fmt.Sprintf("client_%d", i), + Credentials: sdk.Credentials{ + Identity: fmt.Sprintf("identity_%d", i), + Secret: fmt.Sprintf("password_%d", i), + }, + Metadata: sdk.Metadata{"name": fmt.Sprintf("client_%d", i)}, + Status: mfclients.EnabledStatus.String(), + } + if i == 50 { + cl.Owner = "clientowner" + cl.Status = mfclients.DisabledStatus.String() + cl.Tags = []string{"tag1", "tag2"} + } + cls = append(cls, cl) } cases := []struct { - desc string - token string - offset uint64 - limit uint64 - err errors.SDKError - response []sdk.User - email string - metadata map[string]interface{} + desc string + token string + status string + total uint64 + offset uint64 + limit uint64 + name string + identifier string + ownerID string + tag string + metadata sdk.Metadata + err errors.SDKError + response []sdk.User }{ { desc: "get a list of users", token: token, - offset: offset, limit: limit, + offset: offset, + total: total, err: nil, - email: "", - response: users[offset:limit], + response: cls[offset:limit], }, { desc: "get a list of users with invalid token", - token: wrongValue, + token: invalidToken, offset: offset, limit: limit, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), - email: "", + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), response: nil, }, { @@ -263,8 +253,7 @@ func TestUsers(t *testing.T) { token: "", offset: offset, limit: limit, - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), - email: "", + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedList, http.StatusInternalServerError), response: nil, }, { @@ -272,8 +261,7 @@ func TestUsers(t *testing.T) { token: token, offset: offset, limit: 0, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - email: "", + err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusInternalServerError), response: nil, }, { @@ -281,217 +269,951 @@ func TestUsers(t *testing.T) { token: token, offset: offset, limit: 110, - err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusBadRequest), - email: "", + err: errors.NewSDKErrorWithStatus(apiutil.ErrLimitSize, http.StatusInternalServerError), response: []sdk.User(nil), }, { - desc: "get a list of users with same email address", - token: token, - offset: 0, - limit: 1, - err: nil, - email: "user99@example.com", - metadata: make(map[string]interface{}), - response: []sdk.User{users[89]}, + desc: "get a list of users with same identity", + token: token, + offset: 0, + limit: 1, + err: nil, + identifier: Identity, + metadata: sdk.Metadata{}, + response: []sdk.User{cls[89]}, + }, + { + desc: "get a list of users with same identity and metadata", + token: token, + offset: 0, + limit: 1, + err: nil, + identifier: Identity, + metadata: sdk.Metadata{ + "name": "client99", + }, + response: []sdk.User{cls[89]}, }, { - desc: "get a list of users with same email address and metadata", - token: token, + desc: "list users with given metadata", + token: generateValidToken(t, svc, cRepo), offset: 0, limit: 1, - err: nil, - email: "user99@example.com", - metadata: map[string]interface{}{ - "name": "user99", + metadata: sdk.Metadata{ + "name": "client99", }, - response: []sdk.User{users[89]}, + response: []sdk.User{cls[89]}, + err: nil, + }, + { + desc: "list users with given name", + token: generateValidToken(t, svc, cRepo), + offset: 0, + limit: 1, + name: "client10", + response: []sdk.User{cls[0]}, + err: nil, + }, + { + desc: "list users with given owner", + token: generateValidToken(t, svc, cRepo), + offset: 0, + limit: 1, + ownerID: "clientowner", + response: []sdk.User{cls[50]}, + err: nil, + }, + { + desc: "list users with given status", + token: generateValidToken(t, svc, cRepo), + offset: 0, + limit: 1, + status: mfclients.DisabledStatus.String(), + response: []sdk.User{cls[50]}, + err: nil, + }, + { + desc: "list users with given tag", + token: generateValidToken(t, svc, cRepo), + offset: 0, + limit: 1, + tag: "tag1", + response: []sdk.User{cls[50]}, + err: nil, }, } + for _, tc := range cases { - filter := sdk.PageMetadata{ - Email: tc.email, + pm := sdk.PageMetadata{ + Status: tc.status, Total: total, Offset: uint64(tc.offset), Limit: uint64(tc.limit), + Name: tc.name, + OwnerID: tc.ownerID, Metadata: tc.metadata, + Tag: tc.tag, } - page, err := mainfluxSDK.Users(filter, tc.token) + + repoCall := pRepo.On("Evaluate", mock.Anything, mock.Anything, mock.Anything).Return(errors.ErrAuthorization) + repoCall1 := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(errors.ErrAuthorization) + repoCall2 := cRepo.On("RetrieveAll", mock.Anything, mock.Anything).Return(mfclients.ClientsPage{Page: convertClientPage(pm), Clients: convertClients(tc.response)}, tc.err) + page, err := clientSDK.Users(pm, generateValidToken(t, svc, cRepo)) assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, page.Users, fmt.Sprintf("%s: expected response user %s, got %s", tc.desc, tc.response, page.Users)) + assert.Equal(t, tc.response, page.Users, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() } } -func TestCreateToken(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) +func TestListMembers(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + var nClients = uint64(10) + var aClients = []sdk.User{} + + for i := uint64(1); i < nClients; i++ { + client := sdk.User{ + Name: fmt.Sprintf("member_%d@example.com", i), + Credentials: sdk.Credentials{ + Identity: fmt.Sprintf("member_%d@example.com", i), + Secret: "password", + }, + Tags: []string{"tag1", "tag2"}, + Metadata: sdk.Metadata{"role": "client"}, + Status: mfclients.EnabledStatus.String(), + } + aClients = append(aClients, client) + } + + cases := []struct { + desc string + token string + groupID string + page sdk.PageMetadata + response []sdk.User + err errors.SDKError + }{ + { + desc: "list clients with authorized token", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{}, + response: aClients, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Offset: 4, + Limit: nClients, + }, + response: aClients[4:], + err: nil, + }, + { + desc: "list clients with given name", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Name: Identity, + Offset: 6, + Limit: nClients, + }, + response: aClients[6:], + err: nil, + }, + + { + desc: "list clients with given ownerID", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + OwnerID: user.Owner, + Offset: 6, + Limit: nClients, + }, + response: aClients[6:], + err: nil, + }, + { + desc: "list clients with given subject", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Subject: subject, + Offset: 6, + Limit: nClients, + }, + response: aClients[6:], + err: nil, + }, + { + desc: "list clients with given object", + token: generateValidToken(t, svc, cRepo), + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{ + Object: object, + Offset: 6, + Limit: nClients, + }, + response: aClients[6:], + err: nil, + }, + { + desc: "list clients with an invalid token", + token: invalidToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: sdk.PageMetadata{}, + response: []sdk.User(nil), + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "list clients with an invalid id", + token: generateValidToken(t, svc, cRepo), + groupID: mocks.WrongID, + page: sdk.PageMetadata{}, + response: []sdk.User(nil), + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, } - mainfluxSDK := sdk.NewSDK(sdkConf) - user := sdk.User{Email: "user@example.com", Password: "password"} + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("Members", mock.Anything, tc.groupID, mock.Anything).Return(mfclients.MembersPage{Members: convertClients(tc.response)}, tc.err) + membersPage, err := clientSDK.Members(tc.groupID, tc.page, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, membersPage.Members, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, membersPage.Members)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Members", mock.Anything, tc.groupID, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Members was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) +func TestClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() - _, err := mainfluxSDK.CreateUser(user, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + user = sdk.User{ + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) cases := []struct { - desc string - user sdk.User - token string - err errors.SDKError + desc string + token string + clientID string + response sdk.User + err errors.SDKError }{ { - desc: "create token for user", - user: user, - token: token, - err: nil, + desc: "view client successfully", + response: user, + token: generateValidToken(t, svc, cRepo), + clientID: generateUUID(t), + err: nil, }, { - desc: "create token for non existing user", - user: sdk.User{Email: "user2@example.com", Password: "password"}, - token: "", - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + desc: "view client with an invalid token", + response: sdk.User{}, + token: invalidToken, + clientID: generateUUID(t), + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), }, { - desc: "create user with empty email", - user: sdk.User{Email: "", Password: "password"}, - token: "", - err: errors.NewSDKErrorWithStatus(errors.ErrMalformedEntity, http.StatusBadRequest), + desc: "view client with valid token and invalid client id", + response: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + clientID: mocks.WrongID, + err: errors.NewSDKErrorWithStatus(errors.ErrNotFound, http.StatusNotFound), + }, + { + desc: "view client with an invalid token and invalid client id", + response: sdk.User{}, + token: invalidToken, + clientID: mocks.WrongID, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), }, } + for _, tc := range cases { - token, err := mainfluxSDK.CreateToken(tc.user) + repoCall := pRepo.On("Evaluate", mock.Anything, mock.Anything, mock.Anything).Return(nil) + repoCall1 := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall2 := cRepo.On("RetrieveByID", mock.Anything, tc.clientID).Return(convertClient(tc.response), tc.err) + rClient, err := clientSDK.User(tc.clientID, tc.token) assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) - assert.Equal(t, tc.token, token, fmt.Sprintf("%s: expected response: %s, got: %s", tc.desc, token, tc.token)) + assert.Equal(t, tc.response, rClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, rClient)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, tc.clientID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + repoCall2.Unset() + repoCall1.Unset() + repoCall.Unset() } } -func TestUpdateUser(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) +func TestUpdateClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, + + conf := sdk.Config{ + UsersURL: ts.URL, } + clientSDK := sdk.NewSDK(conf) - mainfluxSDK := sdk.NewSDK(sdkConf) - user := sdk.User{Email: "user@example.com", Password: "password"} + user = sdk.User{ + ID: generateUUID(t), + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) + client1 := user + client1.Name = "Updated client" - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() - userID, err := mainfluxSDK.CreateUser(user, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - usertoken, err := mainfluxSDK.CreateToken(user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client2 := user + client2.Metadata = sdk.Metadata{"role": "test"} + client2.ID = invalidIdentity cases := []struct { - desc string - user sdk.User - token string - err errors.SDKError + desc string + client sdk.User + response sdk.User + token string + err errors.SDKError }{ { - desc: "update email for user", - user: sdk.User{ID: userID, Email: "user2@example.com", Password: "password"}, - token: usertoken, - err: nil, + desc: "update client name with valid token", + client: client1, + response: client1, + token: generateValidToken(t, svc, cRepo), + err: nil, }, { - desc: "update email for user with invalid token", - user: sdk.User{ID: userID, Email: "user2@example.com", Password: "password"}, - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + desc: "update client name with invalid token", + client: client1, + response: sdk.User{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), }, { - desc: "update email for user with empty token", - user: sdk.User{ID: userID, Email: "user2@example.com", Password: "password"}, - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), + desc: "update client name with invalid id", + client: client2, + response: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedUpdate, http.StatusInternalServerError), }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("Update", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + uClient, err := clientSDK.UpdateUser(tc.client, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, uClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, uClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Update", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientTags(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + user = sdk.User{ + ID: generateUUID(t), + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } + + client1 := user + client1.Tags = []string{"updatedTag1", "updatedTag2"} + + client2 := user + client2.ID = invalidIdentity + + cases := []struct { + desc string + client sdk.User + response sdk.User + token string + err error + }{ { - desc: "update metadata for user", - user: sdk.User{ID: userID, Metadata: metadata, Password: "password"}, - token: usertoken, - err: nil, + desc: "update client name with valid token", + client: user, + response: client1, + token: generateValidToken(t, svc, cRepo), + err: nil, + }, + { + desc: "update client name with invalid token", + client: client1, + response: sdk.User{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update client name with invalid id", + client: client2, + response: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedUpdate, http.StatusInternalServerError), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateTags", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + uClient, err := clientSDK.UpdateUserTags(tc.client, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, uClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, uClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateTags", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateTags was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientIdentity(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + user = sdk.User{ + ID: generateUUID(t), + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "updatedclientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + } + + client2 := user + client2.Metadata = sdk.Metadata{"role": "test"} + client2.ID = invalidIdentity + + cases := []struct { + desc string + client sdk.User + response sdk.User + token string + err errors.SDKError + }{ + { + desc: "update client name with valid token", + client: user, + response: user, + token: generateValidToken(t, svc, cRepo), + err: nil, + }, + { + desc: "update client name with invalid token", + client: user, + response: sdk.User{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update client name with invalid id", + client: client2, + response: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedUpdate, http.StatusInternalServerError), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateIdentity", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + uClient, err := clientSDK.UpdateUserIdentity(tc.client, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, uClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, uClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateIdentity", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateIdentity was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientSecret(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + user.ID = generateUUID(t) + rclient := user + rclient.Credentials.Secret, _ = phasher.Hash(user.Credentials.Secret) + + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), user.Credentials.Identity).Return(convertClient(rclient), nil) + token, err := svc.IssueToken(context.Background(), user.Credentials.Identity, user.Credentials.Secret) + assert.Nil(t, err, fmt.Sprintf("Issue token expected nil got %s\n", err)) + repoCall.Unset() + + cases := []struct { + desc string + oldSecret string + newSecret string + token string + response sdk.User + err error + }{ + { + desc: "update client secret with valid token", + oldSecret: user.Credentials.Secret, + newSecret: "newSecret", + token: token.AccessToken, + response: rclient, + err: nil, + }, + { + desc: "update client secret with invalid token", + oldSecret: user.Credentials.Secret, + newSecret: "newPassword", + token: "non-existent", + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update client secret with wrong old secret", + oldSecret: "oldSecret", + newSecret: "newSecret", + token: token.AccessToken, + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(apiutil.ErrInvalidSecret, http.StatusBadRequest), + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", mock.Anything, user.ID).Return(convertClient(tc.response), tc.err) + repoCall1 := cRepo.On("RetrieveByIdentity", mock.Anything, user.Credentials.Identity).Return(convertClient(tc.response), tc.err) + repoCall2 := cRepo.On("UpdateSecret", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + uClient, err := clientSDK.UpdatePassword(tc.oldSecret, tc.newSecret, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, uClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, uClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, user.ID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByIdentity", mock.Anything, user.Credentials.Identity) + assert.True(t, ok, fmt.Sprintf("RetrieveByIdentity was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "UpdateSecret", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateSecret was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } +} + +func TestUpdateClientOwner(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + user = sdk.User{ + ID: generateUUID(t), + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: sdk.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validMetadata, + Status: mfclients.EnabledStatus.String(), + Owner: "owner", + } + + client2 := user + client2.ID = invalidIdentity + + cases := []struct { + desc string + client sdk.User + response sdk.User + token string + err errors.SDKError + }{ + { + desc: "update client name with valid token", + client: user, + response: user, + token: generateValidToken(t, svc, cRepo), + err: nil, + }, + { + desc: "update client name with invalid token", + client: client2, + response: sdk.User{}, + token: invalidToken, + err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + }, + { + desc: "update client name with invalid id", + client: client2, + response: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedUpdate, http.StatusInternalServerError), }, } + for _, tc := range cases { - err := mainfluxSDK.UpdateUser(tc.user, tc.token) + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateOwner", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + uClient, err := clientSDK.UpdateUserOwner(tc.client, tc.token) assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, uClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, uClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateOwner", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateOwner was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() } } -func TestUpdatePassword(t *testing.T) { - svc := newUserService() - ts := newUserServer(svc) +func TestEnableClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) defer ts.Close() - sdkConf := sdk.Config{ - UsersURL: ts.URL, - MsgContentType: contentType, - TLSVerification: false, + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + enabledClient1 := sdk.User{ID: testsutil.GenerateUUID(t, idProvider), Credentials: sdk.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus.String()} + disabledClient1 := sdk.User{ID: testsutil.GenerateUUID(t, idProvider), Credentials: sdk.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus.String()} + endisabledClient1 := disabledClient1 + endisabledClient1.Status = mfclients.EnabledStatus.String() + endisabledClient1.ID = testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + id string + token string + client sdk.User + response sdk.User + err errors.SDKError + }{ + { + desc: "enable disabled client", + id: disabledClient1.ID, + token: generateValidToken(t, svc, cRepo), + client: disabledClient1, + response: endisabledClient1, + err: nil, + }, + { + desc: "enable enabled client", + id: enabledClient1.ID, + token: generateValidToken(t, svc, cRepo), + client: enabledClient1, + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedEnable, http.StatusInternalServerError), + }, + { + desc: "enable non-existing client", + id: mocks.WrongID, + token: generateValidToken(t, svc, cRepo), + client: sdk.User{}, + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedEnable, http.StatusNotFound), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveByID", mock.Anything, tc.id).Return(convertClient(tc.client), tc.err) + repoCall2 := cRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + eClient, err := clientSDK.EnableUser(tc.id, tc.token) + assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, eClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, eClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + cases2 := []struct { + desc string + token string + status string + metadata sdk.Metadata + response sdk.UsersPage + size uint64 + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus.String(), + size: 2, + response: sdk.UsersPage{ + Users: []sdk.User{enabledClient1, endisabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus.String(), + size: 1, + response: sdk.UsersPage{ + Users: []sdk.User{disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus.String(), + size: 3, + response: sdk.UsersPage{ + Users: []sdk.User{enabledClient1, disabledClient1, endisabledClient1}, + }, + }, } - mainfluxSDK := sdk.NewSDK(sdkConf) - user := sdk.User{Email: "user@example.com", Password: "password"} + for _, tc := range cases2 { + pm := sdk.PageMetadata{ + Total: 100, + Offset: 0, + Limit: 100, + Status: tc.status, + } - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveAll", mock.Anything, mock.Anything).Return(convertClientsPage(tc.response), nil) + clientsPage, err := clientSDK.Users(pm, generateValidToken(t, svc, cRepo)) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(clientsPage.Users)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() + } +} - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: mfauth.APIKey}) - token := tkn.GetValue() - _, err := mainfluxSDK.CreateUser(user, token) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - usertoken, err := mainfluxSDK.CreateToken(user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) +func TestDisableClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + + svc := clients.NewService(cRepo, pRepo, tokenizer, emailer, phasher, idProvider, passRegex) + ts := newClientServer(svc) + defer ts.Close() + + conf := sdk.Config{ + UsersURL: ts.URL, + } + clientSDK := sdk.NewSDK(conf) + + enabledClient1 := sdk.User{ID: testsutil.GenerateUUID(t, idProvider), Credentials: sdk.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus.String()} + disabledClient1 := sdk.User{ID: testsutil.GenerateUUID(t, idProvider), Credentials: sdk.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus.String()} + disenabledClient1 := enabledClient1 + disenabledClient1.Status = mfclients.DisabledStatus.String() + disenabledClient1.ID = testsutil.GenerateUUID(t, idProvider) cases := []struct { - desc string - oldPass string - newPass string - token string - err errors.SDKError + desc string + id string + token string + client sdk.User + response sdk.User + err errors.SDKError }{ { - desc: "update password for user", - oldPass: "password", - newPass: "password123", - token: usertoken, - err: nil, + desc: "disable enabled client", + id: enabledClient1.ID, + token: generateValidToken(t, svc, cRepo), + client: enabledClient1, + response: disenabledClient1, + err: nil, }, { - desc: "update password for user with invalid token", - oldPass: "password", - newPass: "password123", - token: wrongValue, - err: errors.NewSDKErrorWithStatus(errors.ErrAuthentication, http.StatusUnauthorized), + desc: "disable disabled client", + id: disabledClient1.ID, + token: generateValidToken(t, svc, cRepo), + client: disabledClient1, + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedDisable, http.StatusInternalServerError), }, { - desc: "update password for user with empty token", - oldPass: "password", - newPass: "password123", - token: "", - err: errors.NewSDKErrorWithStatus(apiutil.ErrBearerToken, http.StatusUnauthorized), + desc: "disable non-existing client", + id: mocks.WrongID, + client: sdk.User{}, + token: generateValidToken(t, svc, cRepo), + response: sdk.User{}, + err: errors.NewSDKErrorWithStatus(sdk.ErrFailedDisable, http.StatusNotFound), }, } + for _, tc := range cases { - err := mainfluxSDK.UpdatePassword(tc.oldPass, tc.newPass, tc.token) + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveByID", mock.Anything, tc.id).Return(convertClient(tc.client), tc.err) + repoCall2 := cRepo.On("ChangeStatus", mock.Anything, mock.Anything).Return(convertClient(tc.response), tc.err) + dClient, err := clientSDK.DisableUser(tc.id, tc.token) assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected error %s, got %s", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, dClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, dClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", mock.Anything, tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", mock.Anything, mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + cases2 := []struct { + desc string + token string + status string + metadata sdk.Metadata + response sdk.UsersPage + size uint64 + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus.String(), + size: 2, + response: sdk.UsersPage{ + Users: []sdk.User{enabledClient1, disenabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus.String(), + size: 1, + response: sdk.UsersPage{ + Users: []sdk.User{disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus.String(), + size: 3, + response: sdk.UsersPage{ + Users: []sdk.User{enabledClient1, disabledClient1, disenabledClient1}, + }, + }, + } + + for _, tc := range cases2 { + pm := sdk.PageMetadata{ + Total: 100, + Offset: 0, + Limit: 100, + Status: tc.status, + } + repoCall := pRepo.On("CheckAdmin", mock.Anything, mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveAll", mock.Anything, mock.Anything).Return(convertClientsPage(tc.response), nil) + page, err := clientSDK.Users(pm, generateValidToken(t, svc, cRepo)) + assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Users)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() } } diff --git a/provision/README.md b/provision/README.md index 67de81f636..0e4b320b73 100644 --- a/provision/README.md +++ b/provision/README.md @@ -81,13 +81,13 @@ Example of provision layout below ``` ## Authentication -In order to create necessary entities provision service needs to authenticate against Mainflux. To provide authentication credentials to the provision service you can pass it in an environment variable or in a config file as Mainflux user and password or as API token (that can be issued on `/users` or `/keys` endpoint of [auth](../auth/README.md)). +In order to create necessary entities provision service needs to authenticate against Mainflux. To provide authentication credentials to the provision service you can pass it in an environment variable or in a config file as Mainflux user and password or as API token that can be issued on `/users/tokens/issue`. Additionally users or API token can be passed in Authorization header, this authentication takes precedence over others. * `username`, `password` - (`MF_PROVISION_USER`, `MF_PROVISION_PASSWORD` in [.env](../.env), `mf_user`, `mf_pass` in [config.toml](../docker/addons/provision/configs/config.toml)) * API Key - (`MF_PROVISION_API_KEY` in [.env](../.env) or [config.toml](../docker/addons/provision/configs/config.toml)) -* `Authorization: Bearer Token|ApiKey` - request authorization header containing either users token or API key. Check [auth](../auth/README.md). +* `Authorization: Bearer Token` - request authorization header containing either users token. ## Running Provision service can be run as a standalone or in docker composition as addon to the core docker composition. diff --git a/provision/api/logging.go b/provision/api/logging.go index 8ebd4339c6..b67b52d3d7 100644 --- a/provision/api/logging.go +++ b/provision/api/logging.go @@ -9,19 +9,19 @@ import ( "fmt" "time" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/provision" ) var _ provision.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc provision.Service } // NewLoggingMiddleware adds logging facilities to the core service. -func NewLoggingMiddleware(svc provision.Service, logger log.Logger) provision.Service { +func NewLoggingMiddleware(svc provision.Service, logger mflog.Logger) provision.Service { return &loggingMiddleware{logger, svc} } diff --git a/provision/config.go b/provision/config.go index 36b9ac4542..4f6bd2258d 100644 --- a/provision/config.go +++ b/provision/config.go @@ -5,10 +5,11 @@ package provision import ( "fmt" - "io/ioutil" + "os" + mfclients "github.com/mainflux/mainflux/pkg/clients" "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" + "github.com/mainflux/mainflux/pkg/groups" "github.com/pelletier/go-toml" ) @@ -56,12 +57,12 @@ type Cert struct { // Config struct of Provision type Config struct { - File string `toml:"file"` - Server ServiceConf `toml:"server" mapstructure:"server"` - Bootstrap Bootstrap `toml:"bootstrap" mapstructure:"bootstrap"` - Things []things.Thing `toml:"things" mapstructure:"things"` - Channels []things.Channel `toml:"channels" mapstructure:"channels"` - Cert Cert `toml:"cert" mapstructure:"cert"` + File string `toml:"file"` + Server ServiceConf `toml:"server" mapstructure:"server"` + Bootstrap Bootstrap `toml:"bootstrap" mapstructure:"bootstrap"` + Things []mfclients.Client `toml:"things" mapstructure:"things"` + Channels []groups.Group `toml:"channels" mapstructure:"channels"` + Cert Cert `toml:"cert" mapstructure:"cert"` } // Save - store config in a file @@ -70,7 +71,7 @@ func Save(c Config, file string) error { if err != nil { return errors.New(fmt.Sprintf("Error reading config file: %s", err)) } - if err := ioutil.WriteFile(file, b, 0644); err != nil { + if err := os.WriteFile(file, b, 0644); err != nil { return errors.New(fmt.Sprintf("Error writing toml: %s", err)) } return nil @@ -78,7 +79,7 @@ func Save(c Config, file string) error { // Read - retrieve config from a file func Read(file string) (Config, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) c := Config{} if err != nil { return c, errors.New(fmt.Sprintf("Error reading config file: %s", err)) diff --git a/provision/service.go b/provision/service.go index bb50cefe7f..f1714311ac 100644 --- a/provision/service.go +++ b/provision/service.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" SDK "github.com/mainflux/mainflux/pkg/sdk/go" ) @@ -65,7 +65,7 @@ type Service interface { } type provisionService struct { - logger logger.Logger + logger mflog.Logger sdk SDK.SDK conf Config } @@ -82,7 +82,7 @@ type Result struct { } // New returns new provision service. -func New(cfg Config, sdk SDK.SDK, logger logger.Logger) Service { +func New(cfg Config, sdk SDK.SDK, logger mflog.Logger) Service { return &provisionService{ logger: logger, conf: cfg, @@ -137,16 +137,16 @@ func (ps *provisionService) Provision(token, name, externalID, externalKey strin name = thing.Name } th.Name = name - thID, err := ps.sdk.CreateThing(th, token) + th, err := ps.sdk.CreateThing(th, token) if err != nil { res.Error = err.Error() return res, errors.Wrap(ErrFailedThingCreation, err) } // Get newly created thing (in order to get the key). - th, err = ps.sdk.Thing(thID, token) + th, err = ps.sdk.Thing(th.ID, token) if err != nil { - e := errors.Wrap(err, fmt.Errorf("thing id: %s", thID)) + e := errors.Wrap(err, fmt.Errorf("thing id: %s", th.ID)) return res, errors.Wrap(ErrFailedThingRetrieval, e) } things = append(things, th) @@ -155,15 +155,15 @@ func (ps *provisionService) Provision(token, name, externalID, externalKey strin for _, channel := range ps.conf.Channels { ch := SDK.Channel{ Name: channel.Name, - Metadata: channel.Metadata, + Metadata: SDK.Metadata(channel.Metadata), } - chCreated, err := ps.sdk.CreateChannel(ch, token) + ch, err := ps.sdk.CreateChannel(ch, token) if err != nil { return res, err } - ch, err = ps.sdk.Channel(chCreated, token) + ch, err = ps.sdk.Channel(ch.ID, token) if err != nil { - e := errors.Wrap(err, fmt.Errorf("channel id: %s", chCreated)) + e := errors.Wrap(err, fmt.Errorf("channel id: %s", ch.ID)) return res, errors.Wrap(ErrFailedChannelRetrieval, e) } channels = append(channels, ch) @@ -283,15 +283,17 @@ func (ps *provisionService) createTokenIfEmpty(token string) (string, error) { } u := SDK.User{ - Email: ps.conf.Server.MfUser, - Password: ps.conf.Server.MfPass, + Credentials: SDK.Credentials{ + Identity: ps.conf.Server.MfUser, + Secret: ps.conf.Server.MfPass, + }, } - token, err := ps.sdk.CreateToken(u) + tkn, err := ps.sdk.CreateToken(u) if err != nil { return token, errors.Wrap(ErrFailedToCreateToken, err) } - return token, nil + return tkn.AccessToken, nil } func (ps *provisionService) updateGateway(token string, bs SDK.BootstrapConfig, channels []SDK.Channel) error { @@ -322,7 +324,7 @@ func (ps *provisionService) updateGateway(token string, bs SDK.BootstrapConfig, if err := json.Unmarshal(b, &th.Metadata); err != nil { return errors.Wrap(ErrGatewayUpdate, err) } - if err := ps.sdk.UpdateThing(th, token); err != nil { + if _, err := ps.sdk.UpdateThing(th, token); err != nil { return errors.Wrap(ErrGatewayUpdate, err) } return nil @@ -336,10 +338,12 @@ func (ps *provisionService) errLog(err error) { func clean(ps *provisionService, things []SDK.Thing, channels []SDK.Channel, token string) { for _, t := range things { - ps.errLog(ps.sdk.DeleteThing(t.ID, token)) + _, err := ps.sdk.DisableThing(t.ID, token) + ps.errLog(err) } for _, c := range channels { - ps.errLog(ps.sdk.DeleteChannel(c.ID, token)) + _, err := ps.sdk.DisableChannel(c.ID, token) + ps.errLog(err) } } @@ -351,7 +355,8 @@ func (ps *provisionService) recover(e *error, ths *[]SDK.Thing, chs *[]SDK.Chann if errors.Contains(err, ErrFailedThingRetrieval) || errors.Contains(err, ErrFailedChannelCreation) { for _, th := range things { - ps.errLog(ps.sdk.DeleteThing(th.ID, token)) + _, err := ps.sdk.DisableThing(th.ID, token) + ps.errLog(err) } return } diff --git a/readers/api/endpoint.go b/readers/api/endpoint.go index fb25327789..629bbc27ef 100644 --- a/readers/api/endpoint.go +++ b/readers/api/endpoint.go @@ -7,12 +7,13 @@ import ( "context" "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/readers" + tpolicies "github.com/mainflux/mainflux/things/policies" + upolicies "github.com/mainflux/mainflux/users/policies" ) -func listMessagesEndpoint(svc readers.MessageRepository, tc mainflux.ThingsServiceClient, ac mainflux.AuthServiceClient) endpoint.Endpoint { +func listMessagesEndpoint(svc readers.MessageRepository, tc tpolicies.ThingsServiceClient, ac upolicies.AuthServiceClient) endpoint.Endpoint { return func(ctx context.Context, request interface{}) (interface{}, error) { req := request.(listMessagesReq) diff --git a/readers/api/endpoint_test.go b/readers/api/endpoint_test.go index 7b4faa305e..f166282a7f 100644 --- a/readers/api/endpoint_test.go +++ b/readers/api/endpoint_test.go @@ -11,15 +11,16 @@ import ( "testing" "time" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/transformers/senml" "github.com/mainflux/mainflux/pkg/uuid" "github.com/mainflux/mainflux/readers" "github.com/mainflux/mainflux/readers/api" "github.com/mainflux/mainflux/readers/mocks" - authmocks "github.com/mainflux/mainflux/users/mocks" + "github.com/mainflux/mainflux/things/clients" + tpolicies "github.com/mainflux/mainflux/things/policies" + authmocks "github.com/mainflux/mainflux/users/clients/mocks" + upolicies "github.com/mainflux/mainflux/users/policies" "github.com/stretchr/testify/assert" ) @@ -47,9 +48,8 @@ var ( idProvider = uuid.New() ) -func newServer(repo readers.MessageRepository, tc mainflux.ThingsServiceClient, ac mainflux.AuthServiceClient) *httptest.Server { - logger := logger.NewMock() - mux := api.MakeHandler(repo, tc, ac, svcName, logger) +func newServer(repo readers.MessageRepository, tc tpolicies.ThingsServiceClient, ac upolicies.AuthServiceClient) *httptest.Server { + mux := api.MakeHandler(repo, tc, ac, svcName) return httptest.NewServer(mux) } @@ -131,7 +131,8 @@ func TestReadAll(t *testing.T) { thSvc := mocks.NewThingsService(map[string]string{email: chanID}) mockAuthzDB := map[string][]authmocks.SubjectSet{} - mockAuthzDB[email] = append(mockAuthzDB[email], authmocks.SubjectSet{Object: "authorities", Relation: "member"}) + mockAuthzDB["token"] = append(mockAuthzDB[email], authmocks.SubjectSet{Subject: "token", Relation: clients.AdminRelationKey}) + usrSvc := authmocks.NewAuthService(map[string]string{userToken: email}, mockAuthzDB) repo := mocks.NewMessageRepository(chanID, fromSenml(messages)) diff --git a/readers/api/transport.go b/readers/api/transport.go index 012db8861e..43f5ba93d5 100644 --- a/readers/api/transport.go +++ b/readers/api/transport.go @@ -12,9 +12,10 @@ import ( "github.com/go-zoo/bone" "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/readers" + tpolicies "github.com/mainflux/mainflux/things/policies" + upolicies "github.com/mainflux/mainflux/users/policies" "github.com/prometheus/client_golang/prometheus/promhttp" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -44,14 +45,10 @@ const ( var ( errThingAccess = errors.New("thing has no permission") errUserAccess = errors.New("user has no permission") - thingsAuth mainflux.ThingsServiceClient - usersAuth mainflux.AuthServiceClient ) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc readers.MessageRepository, tc mainflux.ThingsServiceClient, ac mainflux.AuthServiceClient, svcName string, logger logger.Logger) http.Handler { - thingsAuth = tc - usersAuth = ac +func MakeHandler(svc readers.MessageRepository, tc tpolicies.ThingsServiceClient, ac upolicies.AuthServiceClient, svcName string) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(encodeError), @@ -71,7 +68,7 @@ func MakeHandler(svc readers.MessageRepository, tc mainflux.ThingsServiceClient, return mux } -func decodeList(ctx context.Context, r *http.Request) (interface{}, error) { +func decodeList(_ context.Context, r *http.Request) (interface{}, error) { offset, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) if err != nil { return nil, err @@ -215,10 +212,10 @@ func encodeError(_ context.Context, err error, w http.ResponseWriter) { } } -func authorize(ctx context.Context, req listMessagesReq, tc mainflux.ThingsServiceClient, ac mainflux.AuthServiceClient) (err error) { +func authorize(ctx context.Context, req listMessagesReq, tc tpolicies.ThingsServiceClient, ac upolicies.AuthServiceClient) (err error) { switch { case req.token != "": - user, err := usersAuth.Identify(ctx, &mainflux.Token{Value: req.token}) + user, err := ac.Identify(ctx, &upolicies.Token{Value: req.token}) if err != nil { e, ok := status.FromError(err) if ok && e.Code() == codes.PermissionDenied { @@ -226,7 +223,7 @@ func authorize(ctx context.Context, req listMessagesReq, tc mainflux.ThingsServi } return err } - if _, err = thingsAuth.IsChannelOwner(ctx, &mainflux.ChannelOwnerReq{Owner: user.Email, ChanID: req.chanID}); err != nil { + if _, err = tc.Authorize(ctx, &tpolicies.AuthorizeReq{Sub: user.GetId(), Obj: req.chanID, Act: tpolicies.ReadAction, EntityType: tpolicies.GroupEntityType}); err != nil { e, ok := status.FromError(err) if ok && e.Code() == codes.PermissionDenied { return errors.Wrap(errUserAccess, err) @@ -235,7 +232,7 @@ func authorize(ctx context.Context, req listMessagesReq, tc mainflux.ThingsServi } return nil default: - if _, err := thingsAuth.CanAccessByKey(ctx, &mainflux.AccessByKeyReq{Token: req.key, ChanID: req.chanID}); err != nil { + if _, err := tc.Authorize(ctx, &tpolicies.AuthorizeReq{Sub: req.key, Obj: req.chanID, Act: tpolicies.ReadAction, EntityType: tpolicies.GroupEntityType}); err != nil { return errors.Wrap(errThingAccess, err) } return nil diff --git a/readers/cassandra/README.md b/readers/cassandra/README.md index 341bf8bb87..3bf568ec46 100644 --- a/readers/cassandra/README.md +++ b/readers/cassandra/README.md @@ -24,8 +24,8 @@ default values. | MF_JAEGER_URL | Jaeger server URL | localhost:6831 | | MF_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | | MF_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1 | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | ## Deployment diff --git a/readers/cassandra/setup_test.go b/readers/cassandra/setup_test.go index 77643a397b..3ff87f1c92 100644 --- a/readers/cassandra/setup_test.go +++ b/readers/cassandra/setup_test.go @@ -10,11 +10,11 @@ import ( "github.com/gocql/gocql" casClient "github.com/mainflux/mainflux/internal/clients/cassandra" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" dockertest "github.com/ory/dockertest/v3" ) -var logger, _ = log.New(os.Stdout, log.Info.String()) +var logger, _ = mflog.New(os.Stdout, mflog.Info.String()) func TestMain(m *testing.M) { pool, err := dockertest.NewPool("") diff --git a/readers/influxdb/README.md b/readers/influxdb/README.md index f1e69a45c4..ef37aae937 100644 --- a/readers/influxdb/README.md +++ b/readers/influxdb/README.md @@ -32,8 +32,8 @@ default values. | MF_JAEGER_URL | Jaeger server URL | localhost:6831 | | MF_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | | MF_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1s | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | ## Deployment diff --git a/readers/influxdb/setup_test.go b/readers/influxdb/setup_test.go index 5b67707d94..d30897ba5d 100644 --- a/readers/influxdb/setup_test.go +++ b/readers/influxdb/setup_test.go @@ -11,12 +11,12 @@ import ( "time" influxdata "github.com/influxdata/influxdb-client-go/v2" - mainflux_log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" dockertest "github.com/ory/dockertest/v3" ) var ( - testLog, _ = mainflux_log.New(os.Stdout, mainflux_log.Info.String()) + testLog, _ = mflog.New(os.Stdout, mflog.Info.String()) address string ) diff --git a/readers/mocks/things.go b/readers/mocks/things.go index 918dcca639..de24ea4616 100644 --- a/readers/mocks/things.go +++ b/readers/mocks/things.go @@ -6,54 +6,35 @@ package mocks import ( "context" - "github.com/golang/protobuf/ptypes/empty" - - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" "google.golang.org/grpc" ) -var _ mainflux.ThingsServiceClient = (*thingsServiceMock)(nil) +var _ policies.ThingsServiceClient = (*thingsServiceMock)(nil) type thingsServiceMock struct { channels map[string]string } // NewThingsService returns mock implementation of things service -func NewThingsService(channels map[string]string) mainflux.ThingsServiceClient { +func NewThingsService(channels map[string]string) policies.ThingsServiceClient { return &thingsServiceMock{channels} } -func (svc thingsServiceMock) CanAccessByKey(ctx context.Context, in *mainflux.AccessByKeyReq, opts ...grpc.CallOption) (*mainflux.ThingID, error) { - token := in.GetToken() - if token == "invalid" { +func (svc thingsServiceMock) AuthorizeByKey(ctx context.Context, in *policies.AuthorizeReq, opts ...grpc.CallOption) (*policies.ClientID, error) { + token := in.GetSub() + if token == "invalid" || token == "" { return nil, errors.ErrAuthentication } - if token == "" { - return nil, errors.ErrAuthentication - } - - if token == "token" { - return nil, errors.ErrAuthorization - } - - return &mainflux.ThingID{Value: token}, nil + return &policies.ClientID{Value: token}, nil } -func (svc thingsServiceMock) CanAccessByID(context.Context, *mainflux.AccessByIDReq, ...grpc.CallOption) (*empty.Empty, error) { - panic("not implemented") -} - -func (svc thingsServiceMock) IsChannelOwner(ctx context.Context, in *mainflux.ChannelOwnerReq, opts ...grpc.CallOption) (*empty.Empty, error) { - if id, ok := svc.channels[in.GetOwner()]; ok { - if id == in.ChanID { - return nil, nil - } - } - return nil, errors.ErrAuthorization +func (svc thingsServiceMock) Authorize(context.Context, *policies.AuthorizeReq, ...grpc.CallOption) (*policies.AuthorizeRes, error) { + return &policies.AuthorizeRes{Authorized: true}, nil } -func (svc thingsServiceMock) Identify(context.Context, *mainflux.Token, ...grpc.CallOption) (*mainflux.ThingID, error) { +func (svc thingsServiceMock) Identify(context.Context, *policies.Key, ...grpc.CallOption) (*policies.ClientID, error) { panic("not implemented") } diff --git a/readers/mongodb/README.md b/readers/mongodb/README.md index 7b5de026b5..f6e7f18d39 100644 --- a/readers/mongodb/README.md +++ b/readers/mongodb/README.md @@ -22,8 +22,8 @@ default values. | MF_JAEGER_URL | Jaeger server URL | localhost:6831 | | MF_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | | MF_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC request timeout in seconds | 1s | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | ## Deployment diff --git a/readers/mongodb/setup_test.go b/readers/mongodb/setup_test.go index 83350f24fa..d246f17342 100644 --- a/readers/mongodb/setup_test.go +++ b/readers/mongodb/setup_test.go @@ -9,13 +9,13 @@ import ( "os" "testing" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" dockertest "github.com/ory/dockertest/v3" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) -var testLog, _ = log.New(os.Stdout, log.Info.String()) +var testLog, _ = mflog.New(os.Stdout, mflog.Info.String()) func TestMain(m *testing.M) { pool, err := dockertest.NewPool("") diff --git a/readers/postgres/README.md b/readers/postgres/README.md index 20c1df3f39..52d2333e4b 100644 --- a/readers/postgres/README.md +++ b/readers/postgres/README.md @@ -8,26 +8,26 @@ The service is configured using the environment variables presented in the following table. Note that any unset variables will be replaced with their default values. -| Variable | Description | Default | -|-------------------------------------|----------------------------------------------|----------------| -| MF_POSTGRES_READER_LOG_LEVEL | Service log level | info | -| MF_POSTGRES_READER_PORT | Service HTTP port | 9009 | -| MF_POSTGRES_READER_CLIENT_TLS | TLS mode flag | false | -| MF_POSTGRES_READER_CA_CERTS | Path to trusted CAs in PEM format | | -| MF_POSTGRES_READER_DB_HOST | Postgres DB host | postgres | -| MF_POSTGRES_READER_DB_PORT | Postgres DB port | 5432 | -| MF_POSTGRES_READER_DB_USER | Postgres user | mainflux | -| MF_POSTGRES_READER_DB_PASS | Postgres password | mainflux | -| MF_POSTGRES_READER_DB | Postgres database name | messages | -| MF_POSTGRES_READER_DB_SSL_MODE | Postgres SSL mode | disabled | -| MF_POSTGRES_READER_DB_SSL_CERT | Postgres SSL certificate path | "" | -| MF_POSTGRES_READER_DB_SSL_KEY | Postgres SSL key | "" | -| MF_POSTGRES_READER_DB_SSL_ROOT_CERT | Postgres SSL root certificate path | "" | -| MF_JAEGER_URL | Jaeger server URL | localhost:6831 | -| MF_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | -| MF_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC timeout in seconds | 1s | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| Variable | Description | Default | +|-------------------------------------|-----------------------------------------------|----------------| +| MF_POSTGRES_READER_LOG_LEVEL | Service log level | info | +| MF_POSTGRES_READER_PORT | Service HTTP port | 9009 | +| MF_POSTGRES_READER_CLIENT_TLS | TLS mode flag | false | +| MF_POSTGRES_READER_CA_CERTS | Path to trusted CAs in PEM format | | +| MF_POSTGRES_READER_DB_HOST | Postgres DB host | postgres | +| MF_POSTGRES_READER_DB_PORT | Postgres DB port | 5432 | +| MF_POSTGRES_READER_DB_USER | Postgres user | mainflux | +| MF_POSTGRES_READER_DB_PASS | Postgres password | mainflux | +| MF_POSTGRES_READER_DB | Postgres database name | messages | +| MF_POSTGRES_READER_DB_SSL_MODE | Postgres SSL mode | disabled | +| MF_POSTGRES_READER_DB_SSL_CERT | Postgres SSL certificate path | "" | +| MF_POSTGRES_READER_DB_SSL_KEY | Postgres SSL key | "" | +| MF_POSTGRES_READER_DB_SSL_ROOT_CERT | Postgres SSL root certificate path | "" | +| MF_JAEGER_URL | Jaeger server URL | localhost:6831 | +| MF_THINGS_AUTH_GRPC_URL | Things service Auth gRPC URL | localhost:7000 | +| MF_THINGS_AUTH_GRPC_TIMEOUT | Things service Auth gRPC timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | ## Deployment diff --git a/scripts/provision-dev.sh b/scripts/provision-dev.sh index f692e2143d..26658e0a2a 100755 --- a/scripts/provision-dev.sh +++ b/scripts/provision-dev.sh @@ -25,24 +25,26 @@ CHANNEL=$4 #provision user: printf "Provisoning user with email $EMAIL and password $PASSWORD \n" -curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" https://localhost/users -d '{"email":"'"$EMAIL"'", "password":"'"$PASSWORD"'"}' +curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" https://localhost/users -d '{"credentials": {"identity": "'"$EMAIL"'","secret": "'"$PASSWORD"'"}, "status": "enabled", "role": "admin" }' #get jwt token -JWTTOKEN=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" https://localhost/tokens -d '{"email":"'"$EMAIL"'", "password":"'"$PASSWORD"'"}' | grep -Po "token\":\"\K(.*)(?=\")") +JWTTOKEN=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" https://localhost/users/tokens/issue -d '{"identity":"'"$EMAIL"'", "secret":"'"$PASSWORD"'"}' | grep -oP '"access_token":"\K[^"]+' ) printf "JWT TOKEN for user is $JWTTOKEN \n" #provision thing printf "Provisioning thing with name $DEVICE \n" -curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/things -d '{"name":"'"$DEVICE"'"}' +DEVICEID=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/things -d '{"name":"'"$DEVICE"'", "status": "enabled"}' | grep -oP '"id":"\K[^"]+' ) +curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/things/$DEVICEID #get thing token -DEVICETOKEN=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -H "Authorization: Bearer $JWTTOKEN" https://localhost/things/1 | grep -Po "key\":\"\K(.*)(?=\")") +DEVICETOKEN=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -H "Authorization: Bearer $JWTTOKEN" https://localhost/things/$DEVICEID | grep -oP '"secret":"\K[^"]+' ) printf "Device token is $DEVICETOKEN \n" #provision channel printf "Provisioning channel with name $CHANNEL \n" -curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/channels -d '{"name":"'"$CHANNEL"'"}' +CHANNELID=$(curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X POST -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/channels -d '{"name":"'"$CHANNEL"'", "status": "enabled"}' | grep -oP '"id":"\K[^"]+' ) +curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $JWTTOKEN" https://localhost/channels/$CHANNELID #connect thing to channel -printf "Connecting thing to channel \n" -curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X PUT -H "Authorization: Bearer $JWTTOKEN" https://localhost/channels/1/things/1 +printf "Connecting thing of id $DEVICEID to channel of id $CHANNELID \n" +curl -s -S --cacert docker/ssl/certs/mainflux-server.crt --insecure -X PUT -H "Authorization: Bearer $JWTTOKEN" https://localhost/channels/$CHANNELID/things/$DEVICEID diff --git a/scripts/run.sh b/scripts/run.sh index 6db4620674..1632d4ec08 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -38,7 +38,7 @@ done ### # Users ### -MF_USERS_LOG_LEVEL=info MF_USERS_ADMIN_EMAIL=admin@mainflux.com MF_USERS_ADMIN_PASSWORD=12345678 MF_EMAIL_TEMPLATE=../docker/templates/users.tmpl $BUILD_DIR/mainflux-users & +MF_USERS_LOG_LEVEL=info MF_USERS_HTTP_PORT=9002 MF_USERS_GRPC_PORT=7001 MF_USERS_ADMIN_EMAIL=admin@mainflux.com MF_USERS_ADMIN_PASSWORD=12345678 MF_EMAIL_TEMPLATE=../docker/templates/users.tmpl $BUILD_DIR/mainflux-users & ### # Things @@ -65,11 +65,6 @@ MF_MQTT_ADAPTER_LOG_LEVEL=info MF_THINGS_AUTH_GRPC_URL=localhost:7000 $BUILD_DIR ### MF_COAP_ADAPTER_LOG_LEVEL=info MF_COAP_ADAPTER_PORT=5683 MF_THINGS_AUTH_GRPC_URL=localhost:7000 $BUILD_DIR/mainflux-coap & -### -# AUTH -### -MF_AUTH_LOG_LEVEL=debug MF_AUTH_HTTP_PORT=9020 MF_AUTH_GRPC_PORT=7001 MF_AUTH_DB_PORT=5432 MF_AUTH_DB_USER=mainflux MF_AUTH_DB_PASS=mainflux MF_AUTH_DB=auth MF_AUTH_SECRET=secret MF_AUTH_LOGIN_TOKEN_DURATION=10h $BUILD_DIR/mainflux-auth & - trap cleanup EXIT while : ; do sleep 1 ; done diff --git a/things/README.md b/things/README.md index 1a31d826e8..e11c67be50 100644 --- a/things/README.md +++ b/things/README.md @@ -1,4 +1,4 @@ -# Things +# Clients Things service provides an HTTP API for managing platform resources: things and channels. Through this API clients are able to do the following actions: @@ -41,11 +41,11 @@ default values. | MF_THINGS_AUTH_GRPC_PORT | Things service Auth gRPC port | 7000 | | MF_THINGS_SERVER_CERT | Path to server certificate in pem format | | | MF_THINGS_SERVER_KEY | Path to server key in pem format | | -| MF_THINGS_STANDALONE_EMAIL | User email for standalone mode (no gRPC communication with users) | | +| MF_THINGS_STANDALONE_ID | User ID for standalone mode (no gRPC communication with users) | | | MF_THINGS_STANDALONE_TOKEN | User token for standalone mode that should be passed in auth header | | | MF_JAEGER_URL | Jaeger server URL | localhost:6831 | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | **Note** that if you want `things` service to have only one user locally, you should use `MF_THINGS_STANDALONE` env vars. By specifying these, you don't need `auth` service in your deployment for users' authorization. @@ -94,8 +94,8 @@ MF_THINGS_SERVER_KEY=[Path to server key] \ MF_THINGS_STANDALONE_EMAIL=[User email for standalone mode (no gRPC communication with auth)] \ MF_THINGS_STANDALONE_TOKEN=[User token for standalone mode that should be passed in auth header] \ MF_JAEGER_URL=[Jaeger server URL] \ -MF_AUTH_GRPC_URL=[Auth service gRPC URL] \ -MF_AUTH_GRPC_TIMEOUT=[Auth service gRPC request timeout in seconds] \ +MF_AUTH_GRPC_URL=[Users service gRPC URL] \ +MF_AUTH_GRPC_TIMEOUT=[Users service gRPC request timeout in seconds] \ $GOBIN/mainflux-things ``` diff --git a/things/api/auth/grpc/client.go b/things/api/auth/grpc/client.go deleted file mode 100644 index 97612793fd..0000000000 --- a/things/api/auth/grpc/client.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - "time" - - "github.com/go-kit/kit/endpoint" - kitot "github.com/go-kit/kit/tracing/opentracing" - kitgrpc "github.com/go-kit/kit/transport/grpc" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - opentracing "github.com/opentracing/opentracing-go" - "google.golang.org/grpc" -) - -var _ mainflux.ThingsServiceClient = (*grpcClient)(nil) - -type grpcClient struct { - timeout time.Duration - canAccessByKey endpoint.Endpoint - canAccessByID endpoint.Endpoint - isChannelOwner endpoint.Endpoint - identify endpoint.Endpoint -} - -// NewClient returns new gRPC client instance. -func NewClient(conn *grpc.ClientConn, tracer opentracing.Tracer, timeout time.Duration) mainflux.ThingsServiceClient { - svcName := "mainflux.ThingsService" - - return &grpcClient{ - timeout: timeout, - canAccessByKey: kitot.TraceClient(tracer, "can_access")(kitgrpc.NewClient( - conn, - svcName, - "CanAccessByKey", - encodeCanAccessByKeyRequest, - decodeIdentityResponse, - mainflux.ThingID{}, - ).Endpoint()), - canAccessByID: kitot.TraceClient(tracer, "can_access_by_id")(kitgrpc.NewClient( - conn, - svcName, - "CanAccessByID", - encodeCanAccessByIDRequest, - decodeEmptyResponse, - empty.Empty{}, - ).Endpoint()), - isChannelOwner: kitot.TraceClient(tracer, "is_channel_owner")(kitgrpc.NewClient( - conn, - svcName, - "IsChannelOwner", - encodeIsChannelOwner, - decodeEmptyResponse, - empty.Empty{}, - ).Endpoint()), - identify: kitot.TraceClient(tracer, "identify")(kitgrpc.NewClient( - conn, - svcName, - "Identify", - encodeIdentifyRequest, - decodeIdentityResponse, - mainflux.ThingID{}, - ).Endpoint()), - } -} - -func (client grpcClient) CanAccessByKey(ctx context.Context, req *mainflux.AccessByKeyReq, _ ...grpc.CallOption) (*mainflux.ThingID, error) { - ctx, cancel := context.WithTimeout(ctx, client.timeout) - defer cancel() - - ar := accessByKeyReq{ - thingKey: req.GetToken(), - chanID: req.GetChanID(), - } - res, err := client.canAccessByKey(ctx, ar) - if err != nil { - return nil, err - } - - ir := res.(identityRes) - return &mainflux.ThingID{Value: ir.id}, nil -} - -func (client grpcClient) CanAccessByID(ctx context.Context, req *mainflux.AccessByIDReq, _ ...grpc.CallOption) (*empty.Empty, error) { - ar := accessByIDReq{thingID: req.GetThingID(), chanID: req.GetChanID()} - res, err := client.canAccessByID(ctx, ar) - if err != nil { - return nil, err - } - - er := res.(emptyRes) - return &empty.Empty{}, er.err -} - -func (client grpcClient) IsChannelOwner(ctx context.Context, req *mainflux.ChannelOwnerReq, _ ...grpc.CallOption) (*empty.Empty, error) { - ar := channelOwnerReq{owner: req.GetOwner(), chanID: req.GetChanID()} - res, err := client.isChannelOwner(ctx, ar) - if err != nil { - return nil, err - } - - er := res.(emptyRes) - return &empty.Empty{}, er.err -} - -func (client grpcClient) Identify(ctx context.Context, req *mainflux.Token, _ ...grpc.CallOption) (*mainflux.ThingID, error) { - ctx, cancel := context.WithTimeout(ctx, client.timeout) - defer cancel() - - res, err := client.identify(ctx, identifyReq{key: req.GetValue()}) - if err != nil { - return nil, err - } - - ir := res.(identityRes) - return &mainflux.ThingID{Value: ir.id}, nil -} - -func encodeCanAccessByKeyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(accessByKeyReq) - return &mainflux.AccessByKeyReq{Token: req.thingKey, ChanID: req.chanID}, nil -} - -func encodeCanAccessByIDRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(accessByIDReq) - return &mainflux.AccessByIDReq{ThingID: req.thingID, ChanID: req.chanID}, nil -} - -func encodeIsChannelOwner(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(channelOwnerReq) - return &mainflux.ChannelOwnerReq{Owner: req.owner, ChanID: req.chanID}, nil -} - -func encodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(identifyReq) - return &mainflux.Token{Value: req.key}, nil -} - -func decodeIdentityResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(*mainflux.ThingID) - return identityRes{id: res.GetValue()}, nil -} - -func decodeEmptyResponse(_ context.Context, _ interface{}) (interface{}, error) { - return emptyRes{}, nil -} diff --git a/things/api/auth/grpc/endpoint.go b/things/api/auth/grpc/endpoint.go deleted file mode 100644 index b41a7d8f9a..0000000000 --- a/things/api/auth/grpc/endpoint.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/things" -) - -func canAccessEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(accessByKeyReq) - if err := req.validate(); err != nil { - return nil, err - } - - id, err := svc.CanAccessByKey(ctx, req.chanID, req.thingKey) - if err != nil { - return identityRes{}, err - } - return identityRes{id: id}, nil - } -} - -func canAccessByIDEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(accessByIDReq) - if err := req.validate(); err != nil { - return nil, err - } - - err := svc.CanAccessByID(ctx, req.chanID, req.thingID) - return emptyRes{err: err}, err - } -} - -func isChannelOwnerEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(channelOwnerReq) - if err := req.validate(); err != nil { - return nil, err - } - - err := svc.IsChannelOwner(ctx, req.owner, req.chanID) - return emptyRes{err: err}, err - } -} - -func identifyEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(identifyReq) - id, err := svc.Identify(ctx, req.key) - if err := req.validate(); err != nil { - return nil, err - } - if err != nil { - return identityRes{}, err - } - return identityRes{id: id}, nil - } -} diff --git a/things/api/auth/grpc/endpoint_test.go b/things/api/auth/grpc/endpoint_test.go deleted file mode 100644 index c9b89eaf9a..0000000000 --- a/things/api/auth/grpc/endpoint_test.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/opentracing/opentracing-go/mocktracer" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/things" - grpcapi "github.com/mainflux/mainflux/things/api/auth/grpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" -) - -const wrongID = "" - -var ( - thing = things.Thing{Name: "test_app", Metadata: map[string]interface{}{"test": "test"}} - channel = things.Channel{Name: "test", Metadata: map[string]interface{}{"test": "test"}} -) - -func TestCanAccessByKey(t *testing.T) { - ths, err := svc.CreateThings(context.Background(), token, thing, thing) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th1 := ths[0] - th2 := ths[1] - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th1.ID}) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - usersAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(usersAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - cli := grpcapi.NewClient(conn, mocktracer.New(), time.Second) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cases := map[string]struct { - key string - chanID string - thingID string - code codes.Code - }{ - "check if connected thing can access existing channel": { - key: th1.Key, - chanID: ch.ID, - thingID: th1.ID, - code: codes.OK, - }, - "check if unconnected thing can access existing channel": { - key: th2.Key, - chanID: ch.ID, - thingID: wrongID, - code: codes.PermissionDenied, - }, - "check if thing with wrong access key can access existing channel": { - key: wrong, - chanID: ch.ID, - thingID: wrongID, - code: codes.NotFound, - }, - "check if connected thing can access non-existent channel": { - key: th1.Key, - chanID: wrongID, - thingID: wrongID, - code: codes.InvalidArgument, - }, - } - - for desc, tc := range cases { - id, err := cli.CanAccessByKey(ctx, &mainflux.AccessByKeyReq{Token: tc.key, ChanID: tc.chanID}) - e, ok := status.FromError(err) - assert.True(t, ok, "OK expected to be true") - assert.Equal(t, tc.thingID, id.GetValue(), fmt.Sprintf("%s: expected %s got %s", desc, tc.thingID, id.GetValue())) - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", desc, tc.code, e.Code())) - } -} - -func TestCanAccessByID(t *testing.T) { - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th1 := ths[0] - ths, err = svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th2 := ths[0] - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th2.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - usersAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(usersAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - cli := grpcapi.NewClient(conn, mocktracer.New(), time.Second) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cases := map[string]struct { - chanID string - thingID string - code codes.Code - }{ - "check if connected thing can access existing channel": { - chanID: ch.ID, - thingID: th2.ID, - code: codes.OK, - }, - "check if unconnected thing can access existing channel": { - chanID: ch.ID, - thingID: th1.ID, - code: codes.PermissionDenied, - }, - "check if connected thing can access non-existent channel": { - chanID: wrongID, - thingID: th2.ID, - code: codes.InvalidArgument, - }, - "check if thing with empty ID can access existing channel": { - chanID: ch.ID, - thingID: "", - code: codes.InvalidArgument, - }, - "check if connected thing can access channel with empty ID": { - chanID: "", - thingID: th2.ID, - code: codes.InvalidArgument, - }, - } - - for desc, tc := range cases { - _, err := cli.CanAccessByID(ctx, &mainflux.AccessByIDReq{ThingID: tc.thingID, ChanID: tc.chanID}) - e, ok := status.FromError(err) - assert.True(t, ok, "OK expected to be true") - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", desc, tc.code, e.Code())) - } -} - -func TestIdentify(t *testing.T) { - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - sth := ths[0] - - usersAddr := fmt.Sprintf("localhost:%d", port) - conn, err := grpc.Dial(usersAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - cli := grpcapi.NewClient(conn, mocktracer.New(), time.Second) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - cases := map[string]struct { - key string - id string - code codes.Code - }{ - "identify existing thing": { - key: sth.Key, - id: sth.ID, - code: codes.OK, - }, - "identify non-existent thing": { - key: wrong, - id: wrongID, - code: codes.NotFound, - }, - } - - for desc, tc := range cases { - id, err := cli.Identify(ctx, &mainflux.Token{Value: tc.key}) - e, ok := status.FromError(err) - assert.True(t, ok, "OK expected to be true") - assert.Equal(t, tc.id, id.GetValue(), fmt.Sprintf("%s: expected %s got %s", desc, tc.id, id.GetValue())) - assert.Equal(t, tc.code, e.Code(), fmt.Sprintf("%s: expected %s got %s", desc, tc.code, e.Code())) - } -} diff --git a/things/api/auth/grpc/requests.go b/things/api/auth/grpc/requests.go deleted file mode 100644 index a0fb0887e3..0000000000 --- a/things/api/auth/grpc/requests.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import "github.com/mainflux/mainflux/internal/apiutil" - -type accessByKeyReq struct { - thingKey string - chanID string -} - -func (req accessByKeyReq) validate() error { - if req.chanID == "" { - return apiutil.ErrMissingID - } - - if req.thingKey == "" { - return apiutil.ErrBearerKey - } - - return nil -} - -type accessByIDReq struct { - thingID string - chanID string -} - -func (req accessByIDReq) validate() error { - if req.thingID == "" || req.chanID == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type channelOwnerReq struct { - owner string - chanID string -} - -func (req channelOwnerReq) validate() error { - if req.owner == "" || req.chanID == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type identifyReq struct { - key string -} - -func (req identifyReq) validate() error { - if req.key == "" { - return apiutil.ErrBearerKey - } - - return nil -} diff --git a/things/api/auth/grpc/responses.go b/things/api/auth/grpc/responses.go deleted file mode 100644 index c7b6506ff4..0000000000 --- a/things/api/auth/grpc/responses.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -type identityRes struct { - id string -} - -type emptyRes struct { - err error -} diff --git a/things/api/auth/grpc/server.go b/things/api/auth/grpc/server.go deleted file mode 100644 index 12e32ddc03..0000000000 --- a/things/api/auth/grpc/server.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc - -import ( - "context" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kitgrpc "github.com/go-kit/kit/transport/grpc" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" - opentracing "github.com/opentracing/opentracing-go" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var _ mainflux.ThingsServiceServer = (*grpcServer)(nil) - -type grpcServer struct { - canAccessByKey kitgrpc.Handler - canAccessByID kitgrpc.Handler - isChannelOwner kitgrpc.Handler - identify kitgrpc.Handler - mainflux.UnimplementedThingsServiceServer -} - -// NewServer returns new ThingsServiceServer instance. -func NewServer(tracer opentracing.Tracer, svc things.Service) mainflux.ThingsServiceServer { - return &grpcServer{ - canAccessByKey: kitgrpc.NewServer( - kitot.TraceServer(tracer, "can_access")(canAccessEndpoint(svc)), - decodeCanAccessByKeyRequest, - encodeIdentityResponse, - ), - canAccessByID: kitgrpc.NewServer( - canAccessByIDEndpoint(svc), - decodeCanAccessByIDRequest, - encodeEmptyResponse, - ), - isChannelOwner: kitgrpc.NewServer( - isChannelOwnerEndpoint(svc), - decodeIsChannelOwnerRequest, - encodeEmptyResponse, - ), - identify: kitgrpc.NewServer( - kitot.TraceServer(tracer, "identify")(identifyEndpoint(svc)), - decodeIdentifyRequest, - encodeIdentityResponse, - ), - } -} - -func (gs *grpcServer) CanAccessByKey(ctx context.Context, req *mainflux.AccessByKeyReq) (*mainflux.ThingID, error) { - _, res, err := gs.canAccessByKey.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - - return res.(*mainflux.ThingID), nil -} - -func (gs *grpcServer) CanAccessByID(ctx context.Context, req *mainflux.AccessByIDReq) (*empty.Empty, error) { - _, res, err := gs.canAccessByID.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - - return res.(*empty.Empty), nil -} - -func (gs *grpcServer) IsChannelOwner(ctx context.Context, req *mainflux.ChannelOwnerReq) (*empty.Empty, error) { - _, res, err := gs.isChannelOwner.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - - return res.(*empty.Empty), nil -} - -func (gs *grpcServer) Identify(ctx context.Context, req *mainflux.Token) (*mainflux.ThingID, error) { - _, res, err := gs.identify.ServeGRPC(ctx, req) - if err != nil { - return nil, encodeError(err) - } - - return res.(*mainflux.ThingID), nil -} - -func decodeCanAccessByKeyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.AccessByKeyReq) - return accessByKeyReq{thingKey: req.GetToken(), chanID: req.GetChanID()}, nil -} - -func decodeCanAccessByIDRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.AccessByIDReq) - return accessByIDReq{thingID: req.GetThingID(), chanID: req.GetChanID()}, nil -} - -func decodeIsChannelOwnerRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.ChannelOwnerReq) - return channelOwnerReq{owner: req.GetOwner(), chanID: req.GetChanID()}, nil -} - -func decodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { - req := grpcReq.(*mainflux.Token) - return identifyReq{key: req.GetValue()}, nil -} - -func encodeIdentityResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(identityRes) - return &mainflux.ThingID{Value: res.id}, nil -} - -func encodeEmptyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { - res := grpcRes.(emptyRes) - return &empty.Empty{}, encodeError(res.err) -} - -func encodeError(err error) error { - switch err { - case nil: - return nil - case errors.ErrMalformedEntity, - apiutil.ErrMissingID, - apiutil.ErrBearerKey: - return status.Error(codes.InvalidArgument, "received invalid can access request") - case errors.ErrAuthentication: - return status.Error(codes.Unauthenticated, "missing or invalid credentials provided") - case errors.ErrAuthorization: - return status.Error(codes.PermissionDenied, "unauthorized access token provided") - case things.ErrEntityConnected: - return status.Error(codes.PermissionDenied, "entities are not connected") - case errors.ErrNotFound: - return status.Error(codes.NotFound, "entity does not exist") - default: - if errors.Contains(err, errors.ErrNotFound) || errors.Contains(err, errors.ErrViewEntity) { - return status.Error(codes.NotFound, "entity does not exist") - } - return status.Error(codes.Internal, "internal server error") - } -} diff --git a/things/api/auth/grpc/setup_test.go b/things/api/auth/grpc/setup_test.go deleted file mode 100644 index 594f49195d..0000000000 --- a/things/api/auth/grpc/setup_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package grpc_test - -import ( - "fmt" - "log" - "net" - "os" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - grpcapi "github.com/mainflux/mainflux/things/api/auth/grpc" - "github.com/mainflux/mainflux/things/mocks" - "github.com/opentracing/opentracing-go/mocktracer" - "google.golang.org/grpc" -) - -const ( - port = 7000 - token = "token" - wrong = "wrong" - email = "john.doe@email.com" -) - -var svc things.Service - -func TestMain(m *testing.M) { - serverErr := make(chan error) - - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - if err != nil { - log.Fatalf("got unexpected error while creating new listerner: %s", err) - } - - svc = newService(map[string]string{token: email}) - server := grpc.NewServer() - mainflux.RegisterThingsServiceServer(server, grpcapi.NewServer(mocktracer.New(), svc)) - - // Start gRPC server in detached mode. - go func() { - serverErr <- server.Serve(listener) - }() - - code := m.Run() - - server.GracefulStop() - err = <-serverErr - if err != nil { - log.Fatalln("gPRC Server Terminated : ", err) - } - close(serverErr) - os.Exit(code) -} - -func newService(tokens map[string]string) things.Service { - policies := []mocks.MockSubjectSet{{Object: "users", Relation: "member"}} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{email: policies}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} diff --git a/things/api/auth/http/doc.go b/things/api/auth/http/doc.go deleted file mode 100644 index fc329a3d67..0000000000 --- a/things/api/auth/http/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package http contains implementation of things auth service HTTP API. -package http diff --git a/things/api/auth/http/endpoint.go b/things/api/auth/http/endpoint.go deleted file mode 100644 index 0d587726e8..0000000000 --- a/things/api/auth/http/endpoint.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/things" -) - -func identifyEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(identifyReq) - if err := req.validate(); err != nil { - return nil, err - } - - id, err := svc.Identify(ctx, req.Token) - if err != nil { - return nil, err - } - - res := identityRes{ - ID: id, - } - - return res, nil - } -} - -func canAccessByKeyEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(canAccessByKeyReq) - if err := req.validate(); err != nil { - return nil, err - } - - id, err := svc.CanAccessByKey(ctx, req.chanID, req.Token) - if err != nil { - return nil, err - } - - res := identityRes{ - ID: id, - } - - return res, nil - } -} - -func canAccessByIDEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(canAccessByIDReq) - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.CanAccessByID(ctx, req.chanID, req.ThingID); err != nil { - return nil, err - } - - res := canAccessByIDRes{} - return res, nil - } -} diff --git a/things/api/auth/http/endpoint_test.go b/things/api/auth/http/endpoint_test.go deleted file mode 100644 index 883e4dc105..0000000000 --- a/things/api/auth/http/endpoint_test.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/opentracing/opentracing-go/mocktracer" - - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - httpapi "github.com/mainflux/mainflux/things/api/auth/http" - "github.com/mainflux/mainflux/things/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - contentType = "application/json" - email = "user@example.com" - token = "token" - wrong = "wrong_value" -) - -var ( - thing = things.Thing{ - Name: "test_app", - Metadata: map[string]interface{}{"test": "data"}, - } - channel = things.Channel{ - Name: "test_chan", - Metadata: map[string]interface{}{"test": "data"}, - } -) - -type testRequest struct { - client *http.Client - method string - url string - contentType string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - if err != nil { - return nil, err - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - return tr.client.Do(req) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -func newService(tokens map[string]string) things.Service { - policies := []mocks.MockSubjectSet{{Object: "users", Relation: "member"}} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{email: policies}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} - -func newServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) - return httptest.NewServer(mux) -} - -func TestIdentify(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("failed to create thing: %s", err)) - th := ths[0] - - ir := identifyReq{Token: th.Key} - data := toJSON(ir) - - nonexistentData := toJSON(identifyReq{Token: wrong}) - - cases := []struct { - desc string - contentType string - req string - statusCode int - }{ - { - desc: "identify existing thing", - contentType: contentType, - req: data, - statusCode: http.StatusOK, - }, - { - desc: "identify non-existent thing", - contentType: contentType, - req: nonexistentData, - statusCode: http.StatusNotFound, - }, - { - desc: "identify with missing content type", - contentType: wrong, - req: data, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "identify with empty JSON request", - contentType: contentType, - req: "{}", - statusCode: http.StatusUnauthorized, - }, - { - desc: "identify with invalid JSON request", - contentType: contentType, - req: "", - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/identify", ts.URL), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestCanAccessByKey(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("failed to create thing: %s", err)) - th := ths[0] - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("failed to create channel: %s", err)) - ch := chs[0] - - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - require.Nil(t, err, fmt.Sprintf("failed to connect thing and channel: %s", err)) - - data := toJSON(canAccessByKeyReq{ - Token: th.Key, - }) - - cases := []struct { - desc string - contentType string - chanID string - req string - statusCode int - }{ - { - desc: "check access for connected thing and channel", - contentType: contentType, - chanID: ch.ID, - req: data, - statusCode: http.StatusOK, - }, - { - desc: "check access for not connected thing and channel", - contentType: contentType, - chanID: wrong, - req: data, - statusCode: http.StatusForbidden, - }, - { - desc: "check access with invalid content type", - contentType: wrong, - chanID: ch.ID, - req: data, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "check access with empty JSON request", - contentType: contentType, - chanID: ch.ID, - req: "{}", - statusCode: http.StatusUnauthorized, - }, - { - desc: "check access with invalid JSON request", - contentType: contentType, - chanID: ch.ID, - req: "}", - statusCode: http.StatusBadRequest, - }, - { - desc: "check access with empty request", - contentType: contentType, - chanID: ch.ID, - req: "", - statusCode: http.StatusBadRequest, - }, - { - desc: "check access with empty channel id", - contentType: contentType, - chanID: "", - req: data, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/identify/channels/%s/access-by-key", ts.URL, tc.chanID), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestCanAccessByID(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("failed to create thing: %s", err)) - th := ths[0] - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("failed to create channel: %s", err)) - ch := chs[0] - - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - require.Nil(t, err, fmt.Sprintf("failed to connect thing and channel: %s", err)) - - data := toJSON(canAccessByIDReq{ - ThingID: th.ID, - }) - - cases := []struct { - desc string - contentType string - chanID string - req string - statusCode int - }{ - { - desc: "check access for connected thing and channel", - contentType: contentType, - chanID: ch.ID, - req: data, - statusCode: http.StatusOK, - }, - { - desc: "check access for not connected thing and channel", - contentType: contentType, - chanID: wrong, - req: data, - statusCode: http.StatusForbidden, - }, - { - desc: "check access with invalid content type", - contentType: wrong, - chanID: ch.ID, - req: data, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "check access with empty JSON request", - contentType: contentType, - chanID: ch.ID, - req: "{}", - statusCode: http.StatusBadRequest, - }, - { - desc: "check access with invalid JSON request", - contentType: contentType, - chanID: ch.ID, - req: "}", - statusCode: http.StatusBadRequest, - }, - { - desc: "check access with empty request", - contentType: contentType, - chanID: ch.ID, - req: "", - statusCode: http.StatusBadRequest, - }, - { - desc: "check access with empty channel id", - contentType: contentType, - chanID: "", - req: data, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/identify/channels/%s/access-by-id", ts.URL, tc.chanID), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -type identifyReq struct { - Token string `json:"token"` -} - -type canAccessByKeyReq struct { - Token string `json:"token"` -} - -type canAccessByIDReq struct { - ThingID string `json:"thing_id"` -} diff --git a/things/api/auth/http/requests.go b/things/api/auth/http/requests.go deleted file mode 100644 index 0cd9aba149..0000000000 --- a/things/api/auth/http/requests.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import "github.com/mainflux/mainflux/internal/apiutil" - -type identifyReq struct { - Token string `json:"token"` -} - -func (req identifyReq) validate() error { - if req.Token == "" { - return apiutil.ErrBearerKey - } - - return nil -} - -type canAccessByKeyReq struct { - chanID string - Token string `json:"token"` -} - -func (req canAccessByKeyReq) validate() error { - if req.Token == "" { - return apiutil.ErrBearerKey - } - - if req.chanID == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type canAccessByIDReq struct { - chanID string - ThingID string `json:"thing_id"` -} - -func (req canAccessByIDReq) validate() error { - if req.ThingID == "" || req.chanID == "" { - return apiutil.ErrMissingID - } - - return nil -} diff --git a/things/api/auth/http/responses.go b/things/api/auth/http/responses.go deleted file mode 100644 index eaec620739..0000000000 --- a/things/api/auth/http/responses.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import "net/http" - -type identityRes struct { - ID string `json:"id"` -} - -func (res identityRes) Code() int { - return http.StatusOK -} - -func (res identityRes) Headers() map[string]string { - return map[string]string{} -} - -func (res identityRes) Empty() bool { - return false -} - -type canAccessByIDRes struct{} - -func (res canAccessByIDRes) Code() int { - return http.StatusOK -} - -func (res canAccessByIDRes) Headers() map[string]string { - return map[string]string{} -} - -func (res canAccessByIDRes) Empty() bool { - return true -} diff --git a/things/api/auth/http/transport.go b/things/api/auth/http/transport.go deleted file mode 100644 index c43f8c1842..0000000000 --- a/things/api/auth/http/transport.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" - opentracing "github.com/opentracing/opentracing-go" -) - -const contentType = "application/json" - -// MakeHandler returns a HTTP handler for auth API endpoints. -func MakeHandler(tracer opentracing.Tracer, svc things.Service, logger logger.Logger) http.Handler { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - - r := bone.New() - - r.Post("/identify", kithttp.NewServer( - kitot.TraceServer(tracer, "identify")(identifyEndpoint(svc)), - decodeIdentify, - encodeResponse, - opts..., - )) - - r.Post("/identify/channels/:chanID/access-by-key", kithttp.NewServer( - kitot.TraceServer(tracer, "can_access_by_key")(canAccessByKeyEndpoint(svc)), - decodeCanAccessByKey, - encodeResponse, - opts..., - )) - - r.Post("/identify/channels/:chanID/access-by-id", kithttp.NewServer( - kitot.TraceServer(tracer, "can_access_by_id")(canAccessByIDEndpoint(svc)), - decodeCanAccessByID, - encodeResponse, - opts..., - )) - - return r -} - -func decodeIdentify(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := identifyReq{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeCanAccessByKey(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := canAccessByKeyReq{ - chanID: bone.GetValue(r, "chanID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeCanAccessByID(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := canAccessByIDReq{ - chanID: bone.GetValue(r, "chanID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - w.Header().Set("Content-Type", contentType) - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - case errors.Contains(err, apiutil.ErrBearerToken), - errors.Contains(err, apiutil.ErrBearerKey), - errors.Contains(err, errors.ErrAuthentication): - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - case errors.Contains(err, errors.ErrAuthorization): - w.WriteHeader(http.StatusForbidden) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - case errors.Contains(err, errors.ErrMalformedEntity), - errors.Contains(err, apiutil.ErrMissingID): - w.WriteHeader(http.StatusBadRequest) - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/things/api/doc.go b/things/api/doc.go deleted file mode 100644 index fb3127e46b..0000000000 --- a/things/api/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package api contains API-related concerns: endpoint definitions, middlewares -// and all resource representations. -package api diff --git a/things/api/logging.go b/things/api/logging.go deleted file mode 100644 index cf0ddb1c07..0000000000 --- a/things/api/logging.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "fmt" - "time" - - log "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/things" -) - -var _ things.Service = (*loggingMiddleware)(nil) - -type loggingMiddleware struct { - logger log.Logger - svc things.Service -} - -// LoggingMiddleware adds logging facilities to the core service. -func LoggingMiddleware(svc things.Service, logger log.Logger) things.Service { - return &loggingMiddleware{logger, svc} -} - -func (lm *loggingMiddleware) CreateThings(ctx context.Context, token string, ths ...things.Thing) (saved []things.Thing, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method create_things for token %s and things %s took %s to complete", token, saved, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.CreateThings(ctx, token, ths...) -} - -func (lm *loggingMiddleware) UpdateThing(ctx context.Context, token string, thing things.Thing) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method update_thing for token %s and thing %s took %s to complete", token, thing.ID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.UpdateThing(ctx, token, thing) -} - -func (lm *loggingMiddleware) ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method share_thing for token %s and thing %s took %s to complete", token, thingID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ShareThing(ctx, token, thingID, actions, userIDs) -} - -func (lm *loggingMiddleware) UpdateKey(ctx context.Context, token, id, key string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method update_key for thing %s and key %s took %s to complete", id, key, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.UpdateKey(ctx, token, id, key) -} - -func (lm *loggingMiddleware) ViewThing(ctx context.Context, token, id string) (thing things.Thing, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method view_thing for token %s and thing %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ViewThing(ctx, token, id) -} - -func (lm *loggingMiddleware) ListThings(ctx context.Context, token string, pm things.PageMetadata) (_ things.Page, err error) { - defer func(begin time.Time) { - nlog := "" - if pm.Name != "" { - nlog = fmt.Sprintf("with name %s", pm.Name) - } - message := fmt.Sprintf("Method list_things %s for token %s took %s to complete", nlog, token, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListThings(ctx, token, pm) -} - -func (lm *loggingMiddleware) ListThingsByChannel(ctx context.Context, token, chID string, pm things.PageMetadata) (_ things.Page, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_things_by_channel for channel %s took %s to complete", chID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s", message, err)) - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListThingsByChannel(ctx, token, chID, pm) -} - -func (lm *loggingMiddleware) RemoveThing(ctx context.Context, token, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method remove_thing for token %s and thing %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.RemoveThing(ctx, token, id) -} - -func (lm *loggingMiddleware) CreateChannels(ctx context.Context, token string, channels ...things.Channel) (saved []things.Channel, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method create_channels for token %s and channels %s took %s to complete", token, saved, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.CreateChannels(ctx, token, channels...) -} - -func (lm *loggingMiddleware) UpdateChannel(ctx context.Context, token string, channel things.Channel) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method update_channel for token %s and channel %s took %s to complete", token, channel.ID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.UpdateChannel(ctx, token, channel) -} - -func (lm *loggingMiddleware) ViewChannel(ctx context.Context, token, id string) (channel things.Channel, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method view_channel for token %s and channel %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ViewChannel(ctx, token, id) -} - -func (lm *loggingMiddleware) ListChannels(ctx context.Context, token string, pm things.PageMetadata) (_ things.ChannelsPage, err error) { - defer func(begin time.Time) { - nlog := "" - if pm.Name != "" { - nlog = fmt.Sprintf("with name %s", pm.Name) - } - message := fmt.Sprintf("Method list_channels %s for token %s took %s to complete", nlog, token, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListChannels(ctx, token, pm) -} - -func (lm *loggingMiddleware) ListChannelsByThing(ctx context.Context, token, thID string, pm things.PageMetadata) (_ things.ChannelsPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_channels_by_thing for thing %s took %s to complete", thID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s", message, err)) - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListChannelsByThing(ctx, token, thID, pm) -} - -func (lm *loggingMiddleware) RemoveChannel(ctx context.Context, token, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method remove_channel for token %s and channel %s took %s to complete", token, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.RemoveChannel(ctx, token, id) -} - -func (lm *loggingMiddleware) Connect(ctx context.Context, token string, chIDs, thIDs []string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method connect for token %s, channels %s and things %s took %s to complete", token, chIDs, thIDs, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Connect(ctx, token, chIDs, thIDs) -} - -func (lm *loggingMiddleware) Disconnect(ctx context.Context, token string, chIDs, thIDs []string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method disconnect for token %s, channels %v and things %v took %s to complete", token, chIDs, thIDs, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Disconnect(ctx, token, chIDs, thIDs) -} - -func (lm *loggingMiddleware) CanAccessByKey(ctx context.Context, id, key string) (thing string, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method can_access for channel %s and thing %s took %s to complete", id, thing, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.CanAccessByKey(ctx, id, key) -} - -func (lm *loggingMiddleware) CanAccessByID(ctx context.Context, chanID, thingID string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method can_access_by_id for channel %s and thing %s took %s to complete", chanID, thingID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.CanAccessByID(ctx, chanID, thingID) -} - -func (lm *loggingMiddleware) IsChannelOwner(ctx context.Context, owner, chanID string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method is_channel_owner for channel %s and user %s took %s to complete", chanID, owner, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.IsChannelOwner(ctx, owner, chanID) -} - -func (lm *loggingMiddleware) Identify(ctx context.Context, key string) (id string, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method identify for token %s and thing %s took %s to complete", key, id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Identify(ctx, key) -} - -func (lm *loggingMiddleware) ListMembers(ctx context.Context, token, groupID string, pm things.PageMetadata) (tp things.Page, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_members for token %s and group id %s took %s to complete", token, groupID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListMembers(ctx, token, groupID, pm) -} diff --git a/things/api/metrics.go b/things/api/metrics.go deleted file mode 100644 index 5aad2998fd..0000000000 --- a/things/api/metrics.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/mainflux/mainflux/things" -) - -var _ things.Service = (*metricsMiddleware)(nil) - -type metricsMiddleware struct { - counter metrics.Counter - latency metrics.Histogram - svc things.Service -} - -// MetricsMiddleware instruments core service by tracking request count and latency. -func MetricsMiddleware(svc things.Service, counter metrics.Counter, latency metrics.Histogram) things.Service { - return &metricsMiddleware{ - counter: counter, - latency: latency, - svc: svc, - } -} - -func (ms *metricsMiddleware) CreateThings(ctx context.Context, token string, ths ...things.Thing) (saved []things.Thing, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "create_things").Add(1) - ms.latency.With("method", "create_things").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.CreateThings(ctx, token, ths...) -} - -func (ms *metricsMiddleware) UpdateThing(ctx context.Context, token string, thing things.Thing) error { - defer func(begin time.Time) { - ms.counter.With("method", "update_thing").Add(1) - ms.latency.With("method", "update_thing").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.UpdateThing(ctx, token, thing) -} - -func (ms *metricsMiddleware) ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) error { - defer func(begin time.Time) { - ms.counter.With("method", "share_thing").Add(1) - ms.latency.With("method", "share_thing").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ShareThing(ctx, token, thingID, actions, userIDs) -} - -func (ms *metricsMiddleware) UpdateKey(ctx context.Context, token, id, key string) error { - defer func(begin time.Time) { - ms.counter.With("method", "update_key").Add(1) - ms.latency.With("method", "update_key").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.UpdateKey(ctx, token, id, key) -} - -func (ms *metricsMiddleware) ViewThing(ctx context.Context, token, id string) (things.Thing, error) { - defer func(begin time.Time) { - ms.counter.With("method", "view_thing").Add(1) - ms.latency.With("method", "view_thing").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ViewThing(ctx, token, id) -} - -func (ms *metricsMiddleware) ListThings(ctx context.Context, token string, pm things.PageMetadata) (things.Page, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_things").Add(1) - ms.latency.With("method", "list_things").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListThings(ctx, token, pm) -} - -func (ms *metricsMiddleware) ListThingsByChannel(ctx context.Context, token, chID string, pm things.PageMetadata) (things.Page, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_things_by_channel").Add(1) - ms.latency.With("method", "list_things_by_channel").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListThingsByChannel(ctx, token, chID, pm) -} - -func (ms *metricsMiddleware) RemoveThing(ctx context.Context, token, id string) error { - defer func(begin time.Time) { - ms.counter.With("method", "remove_thing").Add(1) - ms.latency.With("method", "remove_thing").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.RemoveThing(ctx, token, id) -} - -func (ms *metricsMiddleware) CreateChannels(ctx context.Context, token string, channels ...things.Channel) (saved []things.Channel, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "create_channels").Add(1) - ms.latency.With("method", "create_channels").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.CreateChannels(ctx, token, channels...) -} - -func (ms *metricsMiddleware) UpdateChannel(ctx context.Context, token string, channel things.Channel) error { - defer func(begin time.Time) { - ms.counter.With("method", "update_channel").Add(1) - ms.latency.With("method", "update_channel").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.UpdateChannel(ctx, token, channel) -} - -func (ms *metricsMiddleware) ViewChannel(ctx context.Context, token, id string) (things.Channel, error) { - defer func(begin time.Time) { - ms.counter.With("method", "view_channel").Add(1) - ms.latency.With("method", "view_channel").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ViewChannel(ctx, token, id) -} - -func (ms *metricsMiddleware) ListChannels(ctx context.Context, token string, pm things.PageMetadata) (things.ChannelsPage, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_channels").Add(1) - ms.latency.With("method", "list_channels").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListChannels(ctx, token, pm) -} - -func (ms *metricsMiddleware) ListChannelsByThing(ctx context.Context, token, thID string, pm things.PageMetadata) (things.ChannelsPage, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_channels_by_thing").Add(1) - ms.latency.With("method", "list_channels_by_thing").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListChannelsByThing(ctx, token, thID, pm) -} - -func (ms *metricsMiddleware) RemoveChannel(ctx context.Context, token, id string) error { - defer func(begin time.Time) { - ms.counter.With("method", "remove_channel").Add(1) - ms.latency.With("method", "remove_channel").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.RemoveChannel(ctx, token, id) -} - -func (ms *metricsMiddleware) Connect(ctx context.Context, token string, chIDs, thIDs []string) error { - defer func(begin time.Time) { - ms.counter.With("method", "connect").Add(1) - ms.latency.With("method", "connect").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Connect(ctx, token, chIDs, thIDs) -} - -func (ms *metricsMiddleware) Disconnect(ctx context.Context, token string, chIDs, thIDs []string) error { - defer func(begin time.Time) { - ms.counter.With("method", "disconnect").Add(1) - ms.latency.With("method", "disconnect").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Disconnect(ctx, token, chIDs, thIDs) -} - -func (ms *metricsMiddleware) CanAccessByKey(ctx context.Context, id, key string) (string, error) { - defer func(begin time.Time) { - ms.counter.With("method", "can_access_by_key").Add(1) - ms.latency.With("method", "can_access_by_key").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.CanAccessByKey(ctx, id, key) -} - -func (ms *metricsMiddleware) CanAccessByID(ctx context.Context, chanID, thingID string) error { - defer func(begin time.Time) { - ms.counter.With("method", "can_access_by_id").Add(1) - ms.latency.With("method", "can_access_by_id").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.CanAccessByID(ctx, chanID, thingID) -} - -func (ms *metricsMiddleware) IsChannelOwner(ctx context.Context, owner, chanID string) error { - defer func(begin time.Time) { - ms.counter.With("method", "is_channel_owner").Add(1) - ms.latency.With("method", "is_channel_owner").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.IsChannelOwner(ctx, owner, chanID) -} - -func (ms *metricsMiddleware) Identify(ctx context.Context, key string) (string, error) { - defer func(begin time.Time) { - ms.counter.With("method", "identify").Add(1) - ms.latency.With("method", "identify").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Identify(ctx, key) -} - -func (ms *metricsMiddleware) ListMembers(ctx context.Context, token, groupID string, pm things.PageMetadata) (tp things.Page, err error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_members").Add(1) - ms.latency.With("method", "list_members").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListMembers(ctx, token, groupID, pm) -} diff --git a/things/api/things/http/doc.go b/things/api/things/http/doc.go deleted file mode 100644 index 12d530b393..0000000000 --- a/things/api/things/http/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package http contains implementation of things service HTTP API. -package http diff --git a/things/api/things/http/endpoint.go b/things/api/things/http/endpoint.go deleted file mode 100644 index 850b72b58d..0000000000 --- a/things/api/things/http/endpoint.go +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -func createThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createThingReq) - - if err := req.validate(); err != nil { - return nil, err - } - - th := things.Thing{ - Key: req.Key, - ID: req.ID, - Name: req.Name, - Metadata: req.Metadata, - } - saved, err := svc.CreateThings(ctx, req.token, th) - if err != nil { - return nil, err - } - - res := thingRes{ - ID: saved[0].ID, - created: true, - } - - return res, nil - } -} - -func createThingsEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createThingsReq) - - if err := req.validate(); err != nil { - return nil, err - } - - ths := []things.Thing{} - for _, tReq := range req.Things { - th := things.Thing{ - Name: tReq.Name, - Key: tReq.Key, - ID: tReq.ID, - Metadata: tReq.Metadata, - } - ths = append(ths, th) - } - - saved, err := svc.CreateThings(ctx, req.token, ths...) - if err != nil { - return nil, err - } - - res := thingsRes{ - Things: []thingRes{}, - created: true, - } - - for _, th := range saved { - tRes := thingRes{ - ID: th.ID, - Name: th.Name, - Key: th.Key, - Metadata: th.Metadata, - } - res.Things = append(res.Things, tRes) - } - - return res, nil - } -} - -func shareThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(shareThingReq) - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.ShareThing(ctx, req.token, req.thingID, req.Policies, req.UserIDs); err != nil { - return nil, err - } - - return shareThingRes{}, nil - } -} - -func updateThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateThingReq) - - if err := req.validate(); err != nil { - return nil, err - } - - thing := things.Thing{ - ID: req.id, - Name: req.Name, - Metadata: req.Metadata, - } - - if err := svc.UpdateThing(ctx, req.token, thing); err != nil { - return nil, err - } - - res := thingRes{ID: req.id, created: false} - return res, nil - } -} - -func updateKeyEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateKeyReq) - - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.UpdateKey(ctx, req.token, req.id, req.Key); err != nil { - return nil, err - } - - res := thingRes{ID: req.id, created: false} - return res, nil - } -} - -func viewThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewResourceReq) - - if err := req.validate(); err != nil { - return nil, err - } - - thing, err := svc.ViewThing(ctx, req.token, req.id) - if err != nil { - return nil, err - } - - res := viewThingRes{ - ID: thing.ID, - Owner: thing.Owner, - Name: thing.Name, - Key: thing.Key, - Metadata: thing.Metadata, - } - return res, nil - } -} - -func listThingsEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listResourcesReq) - - if err := req.validate(); err != nil { - return nil, err - } - - page, err := svc.ListThings(ctx, req.token, req.pageMetadata) - if err != nil { - return nil, err - } - - res := thingsPageRes{ - pageRes: pageRes{ - Total: page.Total, - Offset: page.Offset, - Limit: page.Limit, - Order: page.Order, - Dir: page.Dir, - }, - Things: []viewThingRes{}, - } - for _, thing := range page.Things { - view := viewThingRes{ - ID: thing.ID, - Owner: thing.Owner, - Name: thing.Name, - Key: thing.Key, - Metadata: thing.Metadata, - } - res.Things = append(res.Things, view) - } - - return res, nil - } -} - -func listThingsByChannelEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listByConnectionReq) - - if err := req.validate(); err != nil { - return nil, err - } - - page, err := svc.ListThingsByChannel(ctx, req.token, req.id, req.pageMetadata) - if err != nil { - return nil, err - } - - res := thingsPageRes{ - pageRes: pageRes{ - Total: page.Total, - Offset: page.Offset, - Limit: page.Limit, - }, - Things: []viewThingRes{}, - } - for _, thing := range page.Things { - view := viewThingRes{ - ID: thing.ID, - Owner: thing.Owner, - Key: thing.Key, - Name: thing.Name, - Metadata: thing.Metadata, - } - res.Things = append(res.Things, view) - } - - return res, nil - } -} - -func removeThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewResourceReq) - - err := req.validate() - if err == errors.ErrNotFound { - return removeRes{}, nil - } - - if err != nil { - return nil, err - } - - if err := svc.RemoveThing(ctx, req.token, req.id); err != nil { - return nil, err - } - - return removeRes{}, nil - } -} - -func createChannelEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createChannelReq) - - if err := req.validate(); err != nil { - return nil, err - } - - ch := things.Channel{ - Name: req.Name, - ID: req.ID, - Metadata: req.Metadata} - - saved, err := svc.CreateChannels(ctx, req.token, ch) - if err != nil { - return nil, err - } - - res := channelRes{ - ID: saved[0].ID, - created: true, - } - return res, nil - } -} - -func createChannelsEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createChannelsReq) - - if err := req.validate(); err != nil { - return nil, err - } - - chs := []things.Channel{} - for _, cReq := range req.Channels { - ch := things.Channel{ - Metadata: cReq.Metadata, - Name: cReq.Name, - ID: cReq.ID, - } - chs = append(chs, ch) - } - - saved, err := svc.CreateChannels(ctx, req.token, chs...) - if err != nil { - return nil, err - } - - res := channelsRes{ - Channels: []channelRes{}, - created: true, - } - - for _, ch := range saved { - cRes := channelRes{ - ID: ch.ID, - Name: ch.Name, - Metadata: ch.Metadata, - } - res.Channels = append(res.Channels, cRes) - } - - return res, nil - } -} - -func updateChannelEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateChannelReq) - - if err := req.validate(); err != nil { - return nil, err - } - - channel := things.Channel{ - ID: req.id, - Name: req.Name, - Metadata: req.Metadata, - } - if err := svc.UpdateChannel(ctx, req.token, channel); err != nil { - return nil, err - } - - res := channelRes{ - ID: req.id, - created: false, - } - return res, nil - } -} - -func viewChannelEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewResourceReq) - - if err := req.validate(); err != nil { - return nil, err - } - - channel, err := svc.ViewChannel(ctx, req.token, req.id) - if err != nil { - return nil, err - } - - res := viewChannelRes{ - ID: channel.ID, - Owner: channel.Owner, - Name: channel.Name, - Metadata: channel.Metadata, - } - - return res, nil - } -} - -func listChannelsEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listResourcesReq) - - if err := req.validate(); err != nil { - return nil, err - } - - page, err := svc.ListChannels(ctx, req.token, req.pageMetadata) - if err != nil { - return nil, err - } - - res := channelsPageRes{ - pageRes: pageRes{ - Total: page.Total, - Offset: page.Offset, - Limit: page.Limit, - Order: page.Order, - Dir: page.Dir, - }, - Channels: []viewChannelRes{}, - } - // Cast channels - for _, channel := range page.Channels { - view := viewChannelRes{ - ID: channel.ID, - Owner: channel.Owner, - Name: channel.Name, - Metadata: channel.Metadata, - } - - res.Channels = append(res.Channels, view) - } - - return res, nil - } -} - -func listChannelsByThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listByConnectionReq) - - if err := req.validate(); err != nil { - return nil, err - } - - page, err := svc.ListChannelsByThing(ctx, req.token, req.id, req.pageMetadata) - if err != nil { - return nil, err - } - - res := channelsPageRes{ - pageRes: pageRes{ - Total: page.Total, - Offset: page.Offset, - Limit: page.Limit, - }, - Channels: []viewChannelRes{}, - } - for _, channel := range page.Channels { - view := viewChannelRes{ - ID: channel.ID, - Owner: channel.Owner, - Name: channel.Name, - Metadata: channel.Metadata, - } - res.Channels = append(res.Channels, view) - } - - return res, nil - } -} - -func removeChannelEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewResourceReq) - - if err := req.validate(); err != nil { - if err == errors.ErrNotFound { - return removeRes{}, nil - } - return nil, err - } - - if err := svc.RemoveChannel(ctx, req.token, req.id); err != nil { - return nil, err - } - - return removeRes{}, nil - } -} - -func connectThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - cr := request.(connectThingReq) - - if err := cr.validate(); err != nil { - return nil, err - } - - if err := svc.Connect(ctx, cr.token, []string{cr.chanID}, []string{cr.thingID}); err != nil { - return nil, err - } - - return connectThingRes{}, nil - } -} - -func connectEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - cr := request.(connectReq) - - if err := cr.validate(); err != nil { - return nil, err - } - - if err := svc.Connect(ctx, cr.token, cr.ChannelIDs, cr.ThingIDs); err != nil { - return nil, err - } - - return connectRes{}, nil - } -} - -func disconnectEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - cr := request.(connectReq) - if err := cr.validate(); err != nil { - return nil, err - } - - if err := svc.Disconnect(ctx, cr.token, cr.ChannelIDs, cr.ThingIDs); err != nil { - return nil, err - } - - return disconnectRes{}, nil - } -} - -func disconnectThingEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(connectThingReq) - - if err := req.validate(); err != nil { - return nil, err - } - - if err := svc.Disconnect(ctx, req.token, []string{req.chanID}, []string{req.thingID}); err != nil { - return nil, err - } - - return disconnectThingRes{}, nil - } -} - -func listMembersEndpoint(svc things.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listThingsGroupReq) - if err := req.validate(); err != nil { - return thingsPageRes{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - - page, err := svc.ListMembers(ctx, req.token, req.groupID, req.pageMetadata) - if err != nil { - return thingsPageRes{}, err - } - - return buildThingsResponse(page), nil - } -} - -func buildThingsResponse(up things.Page) thingsPageRes { - res := thingsPageRes{ - pageRes: pageRes{ - Total: up.Total, - Offset: up.Offset, - Limit: up.Limit, - }, - Things: []viewThingRes{}, - } - for _, th := range up.Things { - view := viewThingRes{ - ID: th.ID, - Key: th.Key, - Owner: th.Owner, - Metadata: th.Metadata, - Name: th.Name, - } - res.Things = append(res.Things, view) - } - return res -} diff --git a/things/api/things/http/endpoint_test.go b/things/api/things/http/endpoint_test.go deleted file mode 100644 index 3ebdf17ca5..0000000000 --- a/things/api/things/http/endpoint_test.go +++ /dev/null @@ -1,2880 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strconv" - "strings" - "testing" - "time" - - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - httpapi "github.com/mainflux/mainflux/things/api/things/http" - "github.com/mainflux/mainflux/things/mocks" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - contentType = "application/json" - email = "user@example.com" - adminEmail = "admin@example.com" - otherExampleEmail = "other_user@example.com" - token = "token" - otherExampleToken = "other_token" - wrongValue = "wrong_value" - thingKey = "key" - wrongID = 0 - maxNameSize = 1024 - nameKey = "name" - ascKey = "asc" - descKey = "desc" - prefix = "fe6b4e92-cc98-425e-b0aa-" -) - -var ( - thing = things.Thing{ - Name: "test_app", - Metadata: map[string]interface{}{"test": "data"}, - } - channel = things.Channel{ - Name: "test", - Metadata: map[string]interface{}{"test": "data"}, - } - invalidName = strings.Repeat("m", maxNameSize+1) - notFoundRes = toJSON(apiutil.ErrorRes{Err: errors.ErrNotFound.Error()}) - unauthRes = toJSON(apiutil.ErrorRes{Err: errors.ErrAuthentication.Error()}) - missingTokRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrBearerToken.Error()}) - searchThingReq = things.PageMetadata{ - Limit: 5, - Offset: 0, - } -) - -type testRequest struct { - client *http.Client - method string - url string - contentType string - token string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - if err != nil { - return nil, err - } - if tr.token != "" { - req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - return tr.client.Do(req) -} - -func newService(tokens map[string]string) things.Service { - userPolicy := mocks.MockSubjectSet{Object: "users", Relation: "member"} - adminPolicy := mocks.MockSubjectSet{Object: "authorities", Relation: "member"} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{ - adminEmail: {userPolicy, adminPolicy}, email: {userPolicy}}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} - -func newServer(svc things.Service) *httptest.Server { - logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) - return httptest.NewServer(mux) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -func TestCreateThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - th := thing - th.Key = thingKey - data := toJSON(th) - - th.Name = invalidName - invalidData := toJSON(th) - - cases := []struct { - desc string - req string - contentType string - auth string - statusCode int - location string - }{ - { - desc: "add valid thing", - req: data, - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - location: fmt.Sprintf("/things/%s%012d", uuid.Prefix, 1), - }, - { - desc: "add thing with existing key", - req: data, - contentType: contentType, - auth: token, - statusCode: http.StatusConflict, - location: "", - }, - { - desc: "add thing with empty JSON request", - req: "{}", - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - location: fmt.Sprintf("/things/%s%012d", uuid.Prefix, 3), - }, - { - desc: "add thing with invalid auth token", - req: data, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - location: "", - }, - { - desc: "add thing with empty auth token", - req: data, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - location: "", - }, - { - desc: "add thing with invalid request format", - req: "}", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - { - desc: "add thing with empty request", - req: "", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - { - desc: "add thing without content type", - req: data, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - location: "", - }, - { - desc: "add thing with invalid name", - req: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/things", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - location := res.Header.Get("Location") - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.location, location, fmt.Sprintf("%s: expected location %s got %s", tc.desc, tc.location, location)) - } -} - -func TestCreateThings(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - data := `[{"name": "1", "key": "1"}, {"name": "2", "key": "2"}]` - invalidData := fmt.Sprintf(`[{"name": "%s", "key": "10"}]`, invalidName) - - cases := []struct { - desc string - data string - contentType string - auth string - statusCode int - }{ - { - desc: "create valid things", - data: data, - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - }, - { - desc: "create things with empty request", - data: "", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create thing with invalid request format", - data: "}", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create thing with invalid name", - data: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create things with empty JSON array", - data: "[]", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create thing with existing key", - data: data, - contentType: contentType, - auth: token, - statusCode: http.StatusConflict, - }, - { - desc: "create thing with invalid auth token", - data: data, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "create thing with empty auth token", - data: data, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "create thing without content type", - data: data, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/things/bulk", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.data), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestUpdateThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - data := toJSON(thing) - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - th1 := ths[0] - - th2 := thing - th2.Name = invalidName - invalidData := toJSON(th2) - - cases := []struct { - desc string - req string - id string - contentType string - auth string - statusCode int - }{ - { - desc: "update existing thing", - req: data, - id: th1.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "update thing with empty JSON request", - req: "{}", - id: th1.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "update non-existent thing", - req: data, - id: strconv.FormatUint(wrongID, 10), - contentType: contentType, - auth: token, - statusCode: http.StatusForbidden, - }, - { - desc: "update thing with invalid id", - req: data, - id: "invalid", - contentType: contentType, - auth: token, - statusCode: http.StatusForbidden, - }, - { - desc: "update thing with invalid user token", - req: data, - id: th1.ID, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "update thing with empty user token", - req: data, - id: th1.ID, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "update thing with invalid data format", - req: "{", - id: th1.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update thing with empty request", - req: "", - id: th1.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update thing without content type", - req: data, - id: th1.ID, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "update thing with invalid name", - req: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPut, - url: fmt.Sprintf("%s/things/%s", ts.URL, tc.id), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestShareThing(t *testing.T) { - token2 := "token2" - svc := newService(map[string]string{token: email, token2: "user@ex.com"}) - ts := newServer(svc) - defer ts.Close() - - type shareThingReq struct { - UserIDs []string `json:"user_ids"` - Policies []string `json:"policies"` - } - - data := toJSON(shareThingReq{UserIDs: []string{"token2"}, Policies: []string{"read"}}) - invalidData := toJSON(shareThingReq{}) - invalidPolicies := toJSON(shareThingReq{UserIDs: []string{"token2"}, Policies: []string{"wrong"}}) - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - th := ths[0] - - cases := []struct { - desc string - req string - thingID string - contentType string - token string - statusCode int - }{ - { - desc: "share a thing", - req: data, - thingID: th.ID, - contentType: contentType, - token: token, - statusCode: http.StatusOK, - }, - { - desc: "share a thing with empty content-type", - req: data, - thingID: th.ID, - contentType: "", - token: token, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "share a thing with empty req body", - req: "", - thingID: th.ID, - contentType: contentType, - token: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "share a thing with empty token", - req: data, - thingID: th.ID, - contentType: contentType, - token: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "share a thing with empty thing id", - req: data, - thingID: "", - contentType: contentType, - token: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "share a thing with invalid req body", - req: invalidData, - thingID: th.ID, - contentType: contentType, - token: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "share a thing with invalid policies request", - req: invalidPolicies, - thingID: th.ID, - contentType: contentType, - token: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "share a thing with invalid token", - req: data, - thingID: th.ID, - contentType: contentType, - token: "invalid", - statusCode: http.StatusUnauthorized, - }, - { - desc: "share a thing with unauthorized access", - req: data, - thingID: th.ID, - contentType: contentType, - token: token2, - statusCode: http.StatusForbidden, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/things/%s/share", ts.URL, tc.thingID), - contentType: tc.contentType, - token: tc.token, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestUpdateKey(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - th := thing - th.Key = "key" - ths, err := svc.CreateThings(context.Background(), token, th) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - th = ths[0] - - th.Key = "new-key" - data := toJSON(th) - - th.Key = "key" - dummyData := toJSON(th) - - cases := []struct { - desc string - req string - id string - contentType string - auth string - statusCode int - }{ - { - desc: "update key for an existing thing", - req: data, - id: th.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "update thing with conflicting key", - req: data, - id: th.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusConflict, - }, - { - desc: "update key with empty JSON request", - req: "{}", - id: th.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update key of non-existent thing", - req: dummyData, - id: strconv.FormatUint(wrongID, 10), - contentType: contentType, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "update thing with invalid id", - req: dummyData, - id: "invalid", - contentType: contentType, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "update thing with invalid user token", - req: data, - id: th.ID, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "update thing with empty user token", - req: data, - id: th.ID, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "update thing with invalid data format", - req: "{", - id: th.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update thing with empty request", - req: "", - id: th.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update thing without content type", - req: data, - id: th.ID, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPatch, - url: fmt.Sprintf("%s/things/%s/key", ts.URL, tc.id), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestViewThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - - data := toJSON(thingRes{ - ID: th.ID, - Name: th.Name, - Key: th.Key, - Metadata: th.Metadata, - }) - - cases := []struct { - desc string - id string - auth string - statusCode int - res string - }{ - { - desc: "view existing thing", - id: th.ID, - auth: token, - statusCode: http.StatusOK, - res: data, - }, - { - desc: "view non-existent thing", - id: strconv.FormatUint(wrongID, 10), - auth: token, - statusCode: http.StatusNotFound, - res: notFoundRes, - }, - { - desc: "view thing by passing invalid token", - id: th.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - res: unauthRes, - }, - { - desc: "view thing by passing empty token", - id: th.ID, - auth: "", - statusCode: http.StatusUnauthorized, - res: missingTokRes, - }, - { - desc: "view thing by passing invalid id", - id: "invalid", - auth: token, - statusCode: http.StatusNotFound, - res: notFoundRes, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: fmt.Sprintf("%s/things/%s", ts.URL, tc.id), - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - data := strings.Trim(string(body), "\n") - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, data, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, data)) - } -} - -func TestListThings(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - data := []thingRes{} - for i := 0; i < 100; i++ { - id := fmt.Sprintf("%s%012d", prefix, i+1) - thing1 := thing - thing1.ID = id - ths, err := svc.CreateThings(context.Background(), token, thing1) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - data = append(data, thingRes{ - ID: th.ID, - Name: th.Name, - Key: th.Key, - Metadata: th.Metadata, - }) - } - - thingURL := fmt.Sprintf("%s/things", ts.URL) - cases := []struct { - desc string - auth string - statusCode int - url string - res []thingRes - }{ - { - desc: "get a list of things", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things ordered by name descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=name&dir=desc", thingURL, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things ordered by name ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=name&dir=asc", thingURL, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=wrong", thingURL, 0, 5), - res: nil, - }, - { - desc: "get a list of things with invalid dir", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=name&dir=wrong", thingURL, 0, 5), - res: nil, - }, - { - desc: "get a list of things with invalid token", - auth: wrongValue, - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 0, 1), - res: nil, - }, - { - desc: "get a list of things with empty token", - auth: "", - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 0, 1), - res: nil, - }, - { - desc: "get a list of things with negative offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, -1, 5), - res: nil, - }, - { - desc: "get a list of things with negative limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 1, -5), - res: nil, - }, - { - desc: "get a list of things with zero limit and offset 1", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 1, 0), - res: nil, - }, - { - desc: "get a list of things without offset", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?limit=%d", thingURL, 5), - res: data[0:5], - }, - { - desc: "get a list of things without limit", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d", thingURL, 1), - res: data[1:11], - }, - { - desc: "get a list of things with redundant query params", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&value=something", thingURL, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things with limit greater than max", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", thingURL, 0, 110), - res: nil, - }, - { - desc: "get a list of things with default URL", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s%s", thingURL, ""), - res: data[0:10], - }, - { - desc: "get a list of things with invalid number of params", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", thingURL, "?offset=4&limit=4&limit=5&offset=5"), - res: nil, - }, - { - desc: "get a list of things with invalid offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", thingURL, "?offset=e&limit=5"), - res: nil, - }, - { - desc: "get a list of things with invalid limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", thingURL, "?offset=5&limit=e"), - res: nil, - }, - { - desc: "get a list of things filtering with invalid name", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&name=%s", thingURL, 0, 5, invalidName), - res: nil, - }, - { - desc: "get a list of things sorted by name ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", thingURL, 0, 5, nameKey, ascKey), - res: data[0:5], - }, - { - desc: "get a list of things sorted by name descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", thingURL, 0, 5, nameKey, descKey), - res: data[0:5], - }, - { - desc: "get a list of things sorted with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", thingURL, 0, 5, "wrong", descKey), - res: nil, - }, - { - desc: "get a list of things sorted by name with invalid direction", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", thingURL, 0, 5, nameKey, "wrong"), - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: tc.url, - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - var data thingsPageRes - err = json.NewDecoder(res.Body).Decode(&data) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.ElementsMatch(t, tc.res, data.Things, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, data.Things)) - } -} - -func TestSearchThings(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - th := searchThingReq - validData := toJSON(th) - - th.Dir = "desc" - th.Order = "name" - descData := toJSON(th) - - th.Dir = "asc" - ascData := toJSON(th) - - th.Order = "wrong" - invalidOrderData := toJSON(th) - - th = searchThingReq - th.Dir = "wrong" - invalidDirData := toJSON(th) - - th = searchThingReq - th.Limit = 110 - limitMaxData := toJSON(th) - - th.Limit = 0 - zeroLimitData := toJSON(th) - - th = searchThingReq - th.Name = invalidName - invalidNameData := toJSON(th) - - th.Name = invalidName - invalidData := toJSON(th) - - data := []thingRes{} - for i := 0; i < 100; i++ { - name := "name_" + fmt.Sprintf("%03d", i+1) - id := fmt.Sprintf("%s%012d", prefix, i+1) - ths, err := svc.CreateThings(context.Background(), token, things.Thing{ID: id, Name: name, Metadata: map[string]interface{}{"test": name}}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - data = append(data, thingRes{ - ID: th.ID, - Name: th.Name, - Key: th.Key, - Metadata: th.Metadata, - }) - } - - cases := []struct { - desc string - auth string - statusCode int - req string - res []thingRes - }{ - { - desc: "search things", - auth: token, - statusCode: http.StatusOK, - req: validData, - res: data[0:5], - }, - { - desc: "search things ordered by name descendent", - auth: token, - statusCode: http.StatusOK, - req: descData, - res: data[0:5], - }, - { - desc: "search things ordered by name ascendent", - auth: token, - statusCode: http.StatusOK, - req: ascData, - res: data[0:5], - }, - { - desc: "search things with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidOrderData, - res: nil, - }, - { - desc: "search things with invalid dir", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidDirData, - res: nil, - }, - { - desc: "search things with invalid token", - auth: wrongValue, - statusCode: http.StatusUnauthorized, - req: validData, - res: nil, - }, - { - desc: "search things with invalid data", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidData, - res: nil, - }, - { - desc: "search things with empty token", - auth: "", - statusCode: http.StatusUnauthorized, - req: validData, - res: nil, - }, - { - desc: "search things with zero limit", - auth: token, - statusCode: http.StatusBadRequest, - req: zeroLimitData, - res: nil, - }, - { - desc: "search things without offset", - auth: token, - statusCode: http.StatusOK, - req: validData, - res: data[0:5], - }, - { - desc: "search things with limit greater than max", - auth: token, - statusCode: http.StatusBadRequest, - req: limitMaxData, - res: nil, - }, - { - desc: "search things with default URL", - auth: token, - statusCode: http.StatusOK, - req: validData, - res: data[0:5], - }, - { - desc: "search things filtering with invalid name", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidNameData, - res: nil, - }, - { - desc: "search things sorted by name ascendent", - auth: token, - statusCode: http.StatusOK, - req: validData, - res: data[0:5], - }, - { - desc: "search things sorted by name descendent", - auth: token, - statusCode: http.StatusOK, - req: validData, - res: data[0:5], - }, - { - desc: "search things sorted with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidOrderData, - res: nil, - }, - { - desc: "search things sorted by name with invalid direction", - auth: token, - statusCode: http.StatusBadRequest, - req: invalidDirData, - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/things/search", ts.URL), - token: tc.auth, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - var data thingsPageRes - err = json.NewDecoder(res.Body).Decode(&data) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.ElementsMatch(t, tc.res, data.Things, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, data.Things)) - } -} - -func TestListThingsByChannel(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := chs[0] - - data := []thingRes{} - for i := 0; i < 101; i++ { - id := fmt.Sprintf("%s%012d", prefix, i+1) - thing1 := thing - thing1.ID = id - ths, err := svc.CreateThings(context.Background(), token, thing1) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - data = append(data, thingRes{ - ID: th.ID, - Name: th.Name, - Key: th.Key, - Metadata: th.Metadata, - }) - } - thingURL := fmt.Sprintf("%s/channels", ts.URL) - - // Wait for things and channels to connect. - time.Sleep(time.Second) - - cases := []struct { - desc string - auth string - statusCode int - url string - res []thingRes - }{ - { - desc: "get a list of things by channel", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things by channel with invalid token", - auth: wrongValue, - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 0, 1), - res: nil, - }, - { - desc: "get a list of things by channel with empty token", - auth: "", - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 0, 1), - res: nil, - }, - { - desc: "get a list of things by channel with negative offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, -1, 5), - res: nil, - }, - { - desc: "get a list of things by channel with negative limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 1, -5), - res: nil, - }, - { - desc: "get a list of things by channel with zero limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 1, 0), - res: nil, - }, - { - desc: "get a list of things by channel without offset", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?limit=%d", thingURL, ch.ID, 5), - res: data[0:5], - }, - { - desc: "get a list of things by channel without limit", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?offset=%d", thingURL, ch.ID, 1), - res: data[1:11], - }, - { - desc: "get a list of things by channel with redundant query params", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d&value=something", thingURL, ch.ID, 0, 5), - res: data[0:5], - }, - { - desc: "get a list of things by channel with limit greater than max", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d", thingURL, ch.ID, 0, 110), - res: nil, - }, - { - desc: "get a list of things by channel with default URL", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things", thingURL, ch.ID), - res: data[0:10], - }, - { - desc: "get a list of things by channel with invalid number of params", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things%s", thingURL, ch.ID, "?offset=4&limit=4&limit=5&offset=5"), - res: nil, - }, - { - desc: "get a list of things by channel with invalid offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things%s", thingURL, ch.ID, "?offset=e&limit=5"), - res: nil, - }, - { - desc: "get a list of things by channel with invalid limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things%s", thingURL, ch.ID, "?offset=5&limit=e"), - res: nil, - }, - { - desc: "get a list of things by channel sorted by name ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d&order=%s&dir=%s", thingURL, ch.ID, 0, 5, nameKey, ascKey), - res: data[0:5], - }, - { - desc: "get a list of things by channel sorted by name descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d&order=%s&dir=%s", thingURL, ch.ID, 0, 5, nameKey, descKey), - res: data[0:5], - }, - { - desc: "get a list of things by channel sorted with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d&order=%s&dir=%s", thingURL, ch.ID, 0, 5, "wrong", ascKey), - res: nil, - }, - { - desc: "get a list of things by channel sorted by name with invalid direction", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/things?offset=%d&limit=%d&order=%s&dir=%s", thingURL, ch.ID, 0, 5, nameKey, "wrong"), - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: tc.url, - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - var data thingsPageRes - err = json.NewDecoder(res.Body).Decode(&data) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.ElementsMatch(t, tc.res, data.Things, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, data.Things)) - } -} - -func TestRemoveThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - th := ths[0] - - cases := []struct { - desc string - id string - auth string - status int - }{ - { - desc: "delete existing thing", - id: th.ID, - auth: token, - status: http.StatusNoContent, - }, - { - desc: "delete non-existent thing", - id: strconv.FormatUint(wrongID, 10), - auth: token, - status: http.StatusNotFound, - }, - { - desc: "delete thing with invalid token", - id: th.ID, - auth: wrongValue, - status: http.StatusUnauthorized, - }, - { - desc: "delete thing with empty token", - id: th.ID, - auth: "", - status: http.StatusUnauthorized, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodDelete, - url: fmt.Sprintf("%s/things/%s", ts.URL, tc.id), - token: tc.auth, - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.status, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.status, res.StatusCode)) - } -} - -func TestCreateChannel(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - data := toJSON(channel) - - th := channel - th.Name = invalidName - invalidData := toJSON(th) - - cases := []struct { - desc string - req string - contentType string - auth string - statusCode int - location string - }{ - { - desc: "create new channel", - req: data, - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - location: fmt.Sprintf("/channels/%s%012d", uuid.Prefix, 1), - }, - { - desc: "create new channel with invalid token", - req: data, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - location: "", - }, - { - desc: "create new channel with empty token", - req: data, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - location: "", - }, - { - desc: "create new channel with invalid data format", - req: "{", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - { - desc: "create new channel with empty JSON request", - req: "{}", - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - location: fmt.Sprintf("/channels/%s%012d", uuid.Prefix, 2), - }, - { - desc: "create new channel with empty request", - req: "", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - { - desc: "create new channel without content type", - req: data, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - location: "", - }, - { - desc: "create new channel with invalid name", - req: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - location: "", - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/channels", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - location := res.Header.Get("Location") - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.location, location, fmt.Sprintf("%s: expected location %s got %s", tc.desc, tc.location, location)) - } -} - -func TestCreateChannels(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - data := `[{"name": "1"}, {"name": "2"}]` - invalidData := fmt.Sprintf(`[{"name": "%s"}]`, invalidName) - - cases := []struct { - desc string - data string - contentType string - auth string - statusCode int - }{ - { - desc: "create valid channels", - data: data, - contentType: contentType, - auth: token, - statusCode: http.StatusCreated, - }, - { - desc: "create channel with empty request", - data: "", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create channels with empty JSON", - data: "[]", - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "create channel with invalid auth token", - data: data, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "create channel with empty auth token", - data: data, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "create channel with invalid request format", - data: "}", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "create channel without content type", - data: data, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "create channel with invalid name", - data: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/channels/bulk", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.data), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestUpdateChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - ts := newServer(svc) - defer ts.Close() - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - ch := chs[0] - - c := channel - c.Name = "updated_channel" - updateData := toJSON(c) - - c.Name = invalidName - invalidData := toJSON(c) - - cases := []struct { - desc string - req string - id string - contentType string - auth string - statusCode int - }{ - { - desc: "update existing channel", - req: updateData, - id: ch.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "update non-existing channel", - req: updateData, - id: strconv.FormatUint(wrongID, 10), - contentType: contentType, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "update channel with invalid id", - req: updateData, - id: "invalid", - contentType: contentType, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "update channel with invalid token", - req: updateData, - id: ch.ID, - contentType: contentType, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "update channel with empty token", - req: updateData, - id: ch.ID, - contentType: contentType, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "update channel with invalid data format", - req: "}", - id: ch.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update channel with empty JSON object", - req: "{}", - id: ch.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "update channel with empty request", - req: "", - id: ch.ID, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - { - desc: "update channel with missing content type", - req: updateData, - id: ch.ID, - contentType: "", - auth: token, - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "update channel with invalid name", - req: invalidData, - contentType: contentType, - auth: token, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPut, - url: fmt.Sprintf("%s/channels/%s", ts.URL, tc.id), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestViewChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - ts := newServer(svc) - defer ts.Close() - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - sch := chs[0] - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - th := ths[0] - err = svc.Connect(context.Background(), token, []string{sch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error when connecting to service: %s", err)) - - data := toJSON(channelRes{ - ID: sch.ID, - Name: sch.Name, - Metadata: sch.Metadata, - }) - - cases := []struct { - desc string - id string - auth string - statusCode int - res string - }{ - { - desc: "view existing channel", - id: sch.ID, - auth: token, - statusCode: http.StatusOK, - res: data, - }, - { - desc: "view non-existent channel", - id: strconv.FormatUint(wrongID, 10), - auth: token, - statusCode: http.StatusNotFound, - res: notFoundRes, - }, - { - desc: "view channel with invalid token", - id: sch.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - res: unauthRes, - }, - { - desc: "view channel with empty token", - id: sch.ID, - auth: "", - statusCode: http.StatusUnauthorized, - res: missingTokRes, - }, - { - desc: "view channel with invalid id", - id: "invalid", - auth: token, - statusCode: http.StatusNotFound, - res: notFoundRes, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: fmt.Sprintf("%s/channels/%s", ts.URL, tc.id), - token: tc.auth, - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - data, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - body := strings.Trim(string(data), "\n") - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, body, fmt.Sprintf("%s: got incorrect response body", tc.desc)) - } -} - -func TestListChannels(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - channels := []channelRes{} - for i := 0; i < 101; i++ { - name := "name_" + fmt.Sprintf("%03d", i+1) - chs, err := svc.CreateChannels(context.Background(), token, - things.Channel{ - Name: name, - Metadata: map[string]interface{}{"test": "data"}, - }) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := chs[0] - ths, err := svc.CreateThings(context.Background(), token, thing) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - channels = append(channels, channelRes{ - ID: ch.ID, - Name: ch.Name, - Metadata: ch.Metadata, - }) - } - channelURL := fmt.Sprintf("%s/channels", ts.URL) - - cases := []struct { - desc string - auth string - statusCode int - url string - res []channelRes - }{ - { - desc: "get a list of channels", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, 0, 6), - res: channels[0:6], - }, - { - desc: "get a list of channels ordered by id descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=id&dir=desc", channelURL, 0, 6), - res: channels[len(channels)-6:], - }, - { - desc: "get a list of channels ordered by id ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=id&dir=asc", channelURL, 0, 6), - res: channels[0:6], - }, - { - desc: "get a list of channels with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=wrong", channelURL, 0, 6), - res: nil, - }, - { - desc: "get a list of channels with invalid dir", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=name&dir=wrong", channelURL, 0, 6), - res: nil, - }, - { - desc: "get a list of channels with invalid token", - auth: wrongValue, - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, 0, 1), - res: nil, - }, - { - desc: "get a list of channels with empty token", - auth: "", - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, 0, 1), - res: nil, - }, - { - desc: "get a list of channels with negative offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, -1, 5), - res: nil, - }, - { - desc: "get a list of channels with negative limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, -1, 5), - res: nil, - }, - { - desc: "get a list of channels with zero limit and offset 1", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, 1, 0), - res: nil, - }, - { - desc: "get a list of channels with no offset provided", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?limit=%d", channelURL, 5), - res: channels[0:5], - }, - { - desc: "get a list of channels with no limit provided", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d", channelURL, 1), - res: channels[1:11], - }, - { - desc: "get a list of channels with redundant query params", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&value=something", channelURL, 0, 5), - res: channels[0:5], - }, - { - desc: "get a list of channels with limit greater than max", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d", channelURL, 0, 110), - res: nil, - }, - { - desc: "get a list of channels with default URL", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s%s", channelURL, ""), - res: channels[0:10], - }, - { - desc: "get a list of channels with invalid number of params", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", channelURL, "?offset=4&limit=4&limit=5&offset=5"), - res: nil, - }, - { - desc: "get a list of channels with invalid offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", channelURL, "?offset=e&limit=5"), - res: nil, - }, - { - desc: "get a list of channels with invalid limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s%s", channelURL, "?offset=5&limit=e"), - res: nil, - }, - { - desc: "get a list of channels with invalid name", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&name=%s", channelURL, 0, 10, invalidName), - res: nil, - }, - { - desc: "get a list of channels sorted by name ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", channelURL, 0, 6, nameKey, ascKey), - res: channels[0:6], - }, - { - desc: "get a list of channels sorted by name descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", channelURL, 0, 6, nameKey, descKey), - res: channels[len(channels)-6:], - }, - { - desc: "get a list of channels sorted with invalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", channelURL, 0, 6, "wrong", ascKey), - res: nil, - }, - { - desc: "get a list of channels sorted by name with invalid direction", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s?offset=%d&limit=%d&order=%s&dir=%s", channelURL, 0, 6, nameKey, "wrong"), - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: tc.url, - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - var body channelsPageRes - err = json.NewDecoder(res.Body).Decode(&body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while deconding response body: %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.ElementsMatch(t, tc.res, body.Channels, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, body.Channels)) - } -} - -func TestListChannelsByThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := ths[0] - - channels := []channelRes{} - for i := 0; i < 101; i++ { - id := fmt.Sprintf("%s%012d", prefix, i+1) - channel1 := channel - channel1.ID = id - chs, err := svc.CreateChannels(context.Background(), token, channel1) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := chs[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - channels = append(channels, channelRes{ - ID: ch.ID, - Name: ch.Name, - Metadata: ch.Metadata, - }) - } - channelURL := fmt.Sprintf("%s/things", ts.URL) - - cases := []struct { - desc string - auth string - statusCode int - url string - res []channelRes - }{ - { - desc: "get a list of channels by thing", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, 0, 6), - res: channels[0:6], - }, - { - desc: "get a list of channels by thing with invalid token", - auth: wrongValue, - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, 0, 1), - res: nil, - }, - { - desc: "get a list of channels by thing with empty token", - auth: "", - statusCode: http.StatusUnauthorized, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, 0, 1), - res: nil, - }, - { - desc: "get a list of channels by thing with negative offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, -1, 5), - res: nil, - }, - { - desc: "get a list of channels by thing with negative limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, -1, 5), - res: nil, - }, - { - desc: "get a list of channels by thing with zero limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, 1, 0), - res: nil, - }, - { - desc: "get a list of channels by thing with no offset provided", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?limit=%d", channelURL, th.ID, 5), - res: channels[0:5], - }, - { - desc: "get a list of channels by thing with no limit provided", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?offset=%d", channelURL, th.ID, 1), - res: channels[1:11], - }, - { - desc: "get a list of channels by thing with redundant query params", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d&value=something", channelURL, th.ID, 0, 5), - res: channels[0:5], - }, - { - desc: "get a list of channels by thing with limit greater than max", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d", channelURL, th.ID, 0, 110), - res: nil, - }, - { - desc: "get a list of channels by thing with default URL", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels", channelURL, th.ID), - res: channels[0:10], - }, - { - desc: "get a list of channels by thing with invalid number of params", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels%s", channelURL, th.ID, "?offset=4&limit=4&limit=5&offset=5"), - res: nil, - }, - { - desc: "get a list of channels by thing with invalid offset", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels%s", channelURL, th.ID, "?offset=e&limit=5"), - res: nil, - }, - { - desc: "get a list of channels by thing with invalid limit", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels%s", channelURL, th.ID, "?offset=5&limit=e"), - res: nil, - }, - { - desc: "get a list of channels by thing sorted by name ascendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d&order=%s&dir=%s", channelURL, th.ID, 0, 6, nameKey, ascKey), - res: channels[0:6], - }, - { - desc: "get a list of channels by thing sorted by name descendent", - auth: token, - statusCode: http.StatusOK, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d&order=%s&dir=%s", channelURL, th.ID, 0, 6, nameKey, descKey), - res: channels[0:6], - }, - { - desc: "get a list of channels by thing sorted with inalid order", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d&order=%s&dir=%s", channelURL, th.ID, 0, 6, "wrong", ascKey), - res: nil, - }, - { - desc: "get a list of channels by thing sorted by name with invalid direction", - auth: token, - statusCode: http.StatusBadRequest, - url: fmt.Sprintf("%s/%s/channels?offset=%d&limit=%d&order=%s&dir=%s", channelURL, th.ID, 0, 6, nameKey, "wrong"), - res: nil, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodGet, - url: tc.url, - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - - var body channelsPageRes - err = json.NewDecoder(res.Body).Decode(&body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error while decoding response body: %s", tc.desc, err)) - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.ElementsMatch(t, tc.res, body.Channels, fmt.Sprintf("%s: expected body %v got %v", tc.desc, tc.res, body.Channels)) - } -} - -func TestRemoveChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - ts := newServer(svc) - defer ts.Close() - - chs, _ := svc.CreateChannels(context.Background(), token, channel) - ch := chs[0] - - cases := []struct { - desc string - id string - auth string - statusCode int - }{ - { - desc: "remove channel with invalid token", - id: ch.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "remove existing channel", - id: ch.ID, - auth: token, - statusCode: http.StatusNoContent, - }, - { - desc: "remove removed channel", - id: ch.ID, - auth: token, - statusCode: http.StatusNoContent, - }, - { - desc: "remove channel with invalid token", - id: ch.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "remove channel with empty token", - id: ch.ID, - auth: "", - statusCode: http.StatusUnauthorized, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodDelete, - url: fmt.Sprintf("%s/channels/%s", ts.URL, tc.id), - token: tc.auth, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestConnect(t *testing.T) { - otherToken := otherExampleToken - otherEmail := otherExampleEmail - svc := newService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - ts := newServer(svc) - defer ts.Close() - - ths, _ := svc.CreateThings(context.Background(), token, thing) - th1 := ths[0] - chs, _ := svc.CreateChannels(context.Background(), token, channel) - ch1 := chs[0] - chs, _ = svc.CreateChannels(context.Background(), otherToken, channel) - ch2 := chs[0] - - cases := []struct { - desc string - chanID string - thingID string - auth string - statusCode int - }{ - { - desc: "connect existing thing to existing channel", - chanID: ch1.ID, - thingID: th1.ID, - auth: token, - statusCode: http.StatusOK, - }, - { - desc: "connect existing thing to non-existent channel", - chanID: strconv.FormatUint(wrongID, 10), - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "connect non-existing thing to existing channel", - chanID: ch1.ID, - thingID: strconv.FormatUint(wrongID, 10), - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "connect existing thing to channel with invalid id", - chanID: "invalid", - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "connect thing with invalid id to existing channel", - chanID: ch1.ID, - thingID: "invalid", - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "connect existing thing to existing channel with invalid token", - chanID: ch1.ID, - thingID: th1.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "connect existing thing to existing channel with empty token", - chanID: ch1.ID, - thingID: th1.ID, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "connect thing from owner to channel of other user", - chanID: ch2.ID, - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodPut, - url: fmt.Sprintf("%s/channels/%s/things/%s", ts.URL, tc.chanID, tc.thingID), - token: tc.auth, - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestCreateConnections(t *testing.T) { - otherToken := otherExampleToken - otherEmail := otherExampleEmail - svc := newService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - thIDs := []string{} - for _, th := range ths { - thIDs = append(thIDs, th.ID) - } - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - chIDs1 := []string{} - for _, ch := range chs { - chIDs1 = append(chIDs1, ch.ID) - } - chs, err = svc.CreateChannels(context.Background(), otherToken, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - chIDs2 := []string{} - for _, ch := range chs { - chIDs2 = append(chIDs2, ch.ID) - } - - cases := []struct { - desc string - channelIDs []string - thingIDs []string - auth string - contentType string - body string - statusCode int - }{ - { - desc: "connect existing things to existing channels", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusOK, - }, - { - desc: "connect existing things to non-existent channels", - channelIDs: []string{strconv.FormatUint(wrongID, 10)}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "connect non-existing things to existing channels", - channelIDs: chIDs1, - thingIDs: []string{strconv.FormatUint(wrongID, 10)}, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "connect existing things to channel with invalid id", - channelIDs: []string{"invalid"}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "connect things with invalid id to existing channels", - channelIDs: chIDs1, - thingIDs: []string{"invalid"}, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "connect existing things to empty channel ids", - channelIDs: []string{""}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "connect empty things id to existing channels", - channelIDs: chIDs1, - thingIDs: []string{""}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "connect existing things to existing channels with invalid token", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: wrongValue, - contentType: contentType, - statusCode: http.StatusUnauthorized, - }, - { - desc: "connect existing things to existing channels with empty token", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: "", - contentType: contentType, - statusCode: http.StatusUnauthorized, - }, - { - desc: "connect things from owner to channels of other user", - channelIDs: chIDs2, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "connect with invalid content type", - channelIDs: chIDs2, - thingIDs: thIDs, - auth: token, - contentType: "invalid", - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "connect with invalid JSON", - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - body: "{", - }, - { - desc: "connect valid thing ids with empty channel ids", - channelIDs: []string{}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "connect valid channel ids with empty thing ids", - channelIDs: chIDs1, - thingIDs: []string{}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "connect empty channel ids and empty thing ids", - channelIDs: []string{}, - thingIDs: []string{}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - data := struct { - ChannelIDs []string `json:"channel_ids"` - ThingIDs []string `json:"thing_ids"` - }{ - tc.channelIDs, - tc.thingIDs, - } - body := toJSON(data) - - if tc.body != "" { - body = tc.body - } - - req := testRequest{ - client: ts.Client(), - method: http.MethodPost, - url: fmt.Sprintf("%s/connect", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(body), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestDisconnectList(t *testing.T) { - otherToken := otherExampleToken - otherEmail := otherExampleEmail - svc := newService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - ts := newServer(svc) - defer ts.Close() - - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - thIDs := []string{} - for _, th := range ths { - thIDs = append(thIDs, th.ID) - } - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - chIDs1 := []string{} - for _, ch := range chs { - chIDs1 = append(chIDs1, ch.ID) - } - - chs, err = svc.CreateChannels(context.Background(), otherToken, channel) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - chIDs2 := []string{} - for _, ch := range chs { - chIDs2 = append(chIDs2, ch.ID) - } - - err = svc.Connect(context.Background(), token, chIDs1, thIDs) - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s\n", err)) - - cases := []struct { - desc string - channelIDs []string - thingIDs []string - auth string - contentType string - body string - statusCode int - }{ - { - desc: "disconnect existing things from existing channels", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusOK, - }, - { - desc: "disconnect existing things from non-existent channels", - channelIDs: []string{strconv.FormatUint(wrongID, 10)}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect non-existing things from existing channels", - channelIDs: chIDs1, - thingIDs: []string{strconv.FormatUint(wrongID, 10)}, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect existing things from channel with invalid id", - channelIDs: []string{"invalid"}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect things with invalid id from existing channels", - channelIDs: chIDs1, - thingIDs: []string{"invalid"}, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect existing things from empty channel ids", - channelIDs: []string{""}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "disconnect empty things id from existing channels", - channelIDs: chIDs1, - thingIDs: []string{""}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "disconnect existing things from existing channels with invalid token", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: wrongValue, - contentType: contentType, - statusCode: http.StatusUnauthorized, - }, - { - desc: "disconnect existing things from existing channels with empty token", - channelIDs: chIDs1, - thingIDs: thIDs, - auth: "", - contentType: contentType, - statusCode: http.StatusUnauthorized, - }, - { - desc: "disconnect things from channels of other user", - channelIDs: chIDs2, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect with invalid content type", - channelIDs: chIDs2, - thingIDs: thIDs, - auth: token, - contentType: "invalid", - statusCode: http.StatusUnsupportedMediaType, - }, - { - desc: "disconnect with invalid JSON", - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - body: "{", - }, - { - desc: "disconnect valid thing ids from empty channel ids", - channelIDs: []string{}, - thingIDs: thIDs, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "disconnect empty thing ids from valid channel ids", - channelIDs: chIDs1, - thingIDs: []string{}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - { - desc: "disconnect empty thing ids from empty channel ids", - channelIDs: []string{}, - thingIDs: []string{}, - auth: token, - contentType: contentType, - statusCode: http.StatusBadRequest, - }, - } - - for _, tc := range cases { - data := struct { - ChannelIDs []string `json:"channel_ids"` - ThingIDs []string `json:"thing_ids"` - }{ - tc.channelIDs, - tc.thingIDs, - } - body := toJSON(data) - - if tc.body != "" { - body = tc.body - } - - req := testRequest{ - client: ts.Client(), - method: http.MethodPut, - url: fmt.Sprintf("%s/disconnect", ts.URL), - contentType: tc.contentType, - token: tc.auth, - body: strings.NewReader(body), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestDisconnnect(t *testing.T) { - otherToken := otherExampleToken - otherEmail := otherExampleEmail - svc := newService(map[string]string{ - token: email, - otherToken: otherEmail, - }) - ts := newServer(svc) - defer ts.Close() - - ths, _ := svc.CreateThings(context.Background(), token, thing) - th1 := ths[0] - - chs, _ := svc.CreateChannels(context.Background(), token, channel) - ch1 := chs[0] - - err := svc.Connect(context.Background(), token, []string{ch1.ID}, []string{th1.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - chs, _ = svc.CreateChannels(context.Background(), otherToken, channel) - ch2 := chs[0] - - cases := []struct { - desc string - chanID string - thingID string - auth string - statusCode int - }{ - { - desc: "disconnect connected thing from channel", - chanID: ch1.ID, - thingID: th1.ID, - auth: token, - statusCode: http.StatusNoContent, - }, - { - desc: "disconnect non-connected thing from channel", - chanID: ch1.ID, - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect non-existent thing from channel", - chanID: ch1.ID, - thingID: strconv.FormatUint(wrongID, 10), - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect thing from non-existent channel", - chanID: strconv.FormatUint(wrongID, 10), - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect thing from channel with invalid token", - chanID: ch1.ID, - thingID: th1.ID, - auth: wrongValue, - statusCode: http.StatusUnauthorized, - }, - { - desc: "disconnect thing from channel with empty token", - chanID: ch1.ID, - thingID: th1.ID, - auth: "", - statusCode: http.StatusUnauthorized, - }, - { - desc: "disconnect owner's thing from someone elses channel", - chanID: ch2.ID, - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect thing with invalid id from channel", - chanID: ch1.ID, - thingID: "invalid", - auth: token, - statusCode: http.StatusNotFound, - }, - { - desc: "disconnect thing from channel with invalid id", - chanID: "invalid", - thingID: th1.ID, - auth: token, - statusCode: http.StatusNotFound, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: ts.Client(), - method: http.MethodDelete, - url: fmt.Sprintf("%s/channels/%s/things/%s", ts.URL, tc.chanID, tc.thingID), - token: tc.auth, - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: got unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -type thingRes struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Key string `json:"key"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -type channelRes struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -type thingsPageRes struct { - Things []thingRes `json:"things"` - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` -} - -type channelsPageRes struct { - Channels []channelRes `json:"channels"` - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` -} diff --git a/things/api/things/http/requests.go b/things/api/things/http/requests.go deleted file mode 100644 index 2e63e8f170..0000000000 --- a/things/api/things/http/requests.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "github.com/gofrs/uuid" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/things" -) - -const ( - maxLimitSize = 100 - maxNameSize = 1024 - nameOrder = "name" - idOrder = "id" - ascDir = "asc" - descDir = "desc" - readPolicy = "read" - writePolicy = "write" - deletePolicy = "delete" -) - -type createThingReq struct { - token string - Name string `json:"name,omitempty"` - Key string `json:"key,omitempty"` - ID string `json:"id,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func validateUUID(extID string) (err error) { - id, err := uuid.FromString(extID) - if id.String() != extID || err != nil { - return apiutil.ErrInvalidIDFormat - } - - return nil -} - -func (req createThingReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - // Do the validation only if request contains ID - if req.ID != "" { - return validateUUID(req.ID) - } - - return nil -} - -type createThingsReq struct { - token string - Things []createThingReq -} - -func (req createThingsReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.Things) <= 0 { - return apiutil.ErrEmptyList - } - - for _, thing := range req.Things { - if thing.ID != "" { - if err := validateUUID(thing.ID); err != nil { - return err - } - } - - if len(thing.Name) > maxNameSize { - return apiutil.ErrNameSize - } - } - - return nil -} - -type shareThingReq struct { - token string - thingID string - UserIDs []string `json:"user_ids"` - Policies []string `json:"policies"` -} - -func (req shareThingReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.thingID == "" || len(req.UserIDs) == 0 { - return apiutil.ErrMissingID - } - - if len(req.Policies) == 0 { - return apiutil.ErrEmptyList - } - - for _, p := range req.Policies { - if p != readPolicy && p != writePolicy && p != deletePolicy { - return apiutil.ErrMalformedPolicy - } - } - return nil -} - -type updateThingReq struct { - token string - id string - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req updateThingReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - if len(req.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - return nil -} - -type updateKeyReq struct { - token string - id string - Key string `json:"key"` -} - -func (req updateKeyReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - if req.Key == "" { - return apiutil.ErrBearerKey - } - - return nil -} - -type createChannelReq struct { - token string - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req createChannelReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - // Do the validation only if request contains ID - if req.ID != "" { - return validateUUID(req.ID) - } - - return nil -} - -type createChannelsReq struct { - token string - Channels []createChannelReq -} - -func (req createChannelsReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.Channels) <= 0 { - return apiutil.ErrEmptyList - } - - for _, channel := range req.Channels { - if channel.ID != "" { - if err := validateUUID(channel.ID); err != nil { - return err - } - } - - if len(channel.Name) > maxNameSize { - return apiutil.ErrNameSize - } - } - - return nil -} - -type updateChannelReq struct { - token string - id string - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req updateChannelReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - if len(req.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - return nil -} - -type viewResourceReq struct { - token string - id string -} - -func (req viewResourceReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type listResourcesReq struct { - token string - pageMetadata things.PageMetadata -} - -func (req *listResourcesReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.pageMetadata.Limit > maxLimitSize || req.pageMetadata.Limit < 1 { - return apiutil.ErrLimitSize - } - - if len(req.pageMetadata.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - if req.pageMetadata.Order != "" && - req.pageMetadata.Order != nameOrder && req.pageMetadata.Order != idOrder { - return apiutil.ErrInvalidOrder - } - - if req.pageMetadata.Dir != "" && - req.pageMetadata.Dir != ascDir && req.pageMetadata.Dir != descDir { - return apiutil.ErrInvalidDirection - } - - return nil -} - -type listByConnectionReq struct { - token string - id string - pageMetadata things.PageMetadata -} - -func (req listByConnectionReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - - if req.pageMetadata.Limit > maxLimitSize || req.pageMetadata.Limit < 1 { - return apiutil.ErrLimitSize - } - - if req.pageMetadata.Order != "" && - req.pageMetadata.Order != nameOrder && req.pageMetadata.Order != idOrder { - return apiutil.ErrInvalidOrder - } - - if req.pageMetadata.Dir != "" && - req.pageMetadata.Dir != ascDir && req.pageMetadata.Dir != descDir { - return apiutil.ErrInvalidDirection - } - - return nil -} - -type connectThingReq struct { - token string - chanID string - thingID string -} - -func (req connectThingReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.chanID == "" || req.thingID == "" { - return apiutil.ErrMissingID - } - - return nil -} - -type connectReq struct { - token string - ChannelIDs []string `json:"channel_ids,omitempty"` - ThingIDs []string `json:"thing_ids,omitempty"` -} - -func (req connectReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if len(req.ChannelIDs) == 0 || len(req.ThingIDs) == 0 { - return apiutil.ErrEmptyList - } - - for _, chID := range req.ChannelIDs { - if chID == "" { - return apiutil.ErrMissingID - } - } - for _, thingID := range req.ThingIDs { - if thingID == "" { - return apiutil.ErrMissingID - } - } - - return nil -} - -type listThingsGroupReq struct { - token string - groupID string - pageMetadata things.PageMetadata -} - -func (req listThingsGroupReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.groupID == "" { - return apiutil.ErrMissingID - } - - if req.pageMetadata.Limit > maxLimitSize || req.pageMetadata.Limit < 1 { - return apiutil.ErrLimitSize - } - - if len(req.pageMetadata.Name) > maxNameSize { - return apiutil.ErrNameSize - } - - if req.pageMetadata.Order != "" && - req.pageMetadata.Order != nameOrder && req.pageMetadata.Order != idOrder { - return apiutil.ErrInvalidOrder - } - - if req.pageMetadata.Dir != "" && - req.pageMetadata.Dir != ascDir && req.pageMetadata.Dir != descDir { - return apiutil.ErrInvalidDirection - } - - return nil - -} diff --git a/things/api/things/http/responses.go b/things/api/things/http/responses.go deleted file mode 100644 index 011c4970c4..0000000000 --- a/things/api/things/http/responses.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "fmt" - "net/http" - - "github.com/mainflux/mainflux" -) - -var ( - _ mainflux.Response = (*removeRes)(nil) - _ mainflux.Response = (*thingRes)(nil) - _ mainflux.Response = (*viewThingRes)(nil) - _ mainflux.Response = (*thingsPageRes)(nil) - _ mainflux.Response = (*channelRes)(nil) - _ mainflux.Response = (*viewChannelRes)(nil) - _ mainflux.Response = (*channelsPageRes)(nil) - _ mainflux.Response = (*connectThingRes)(nil) - _ mainflux.Response = (*connectRes)(nil) - _ mainflux.Response = (*disconnectThingRes)(nil) - _ mainflux.Response = (*disconnectRes)(nil) - _ mainflux.Response = (*shareThingRes)(nil) -) - -type removeRes struct{} - -func (res removeRes) Code() int { - return http.StatusNoContent -} - -func (res removeRes) Headers() map[string]string { - return map[string]string{} -} - -func (res removeRes) Empty() bool { - return true -} - -type thingRes struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Key string `json:"key"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - created bool -} - -func (res thingRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res thingRes) Headers() map[string]string { - if res.created { - return map[string]string{ - "Location": fmt.Sprintf("/things/%s", res.ID), - "Warning-Deprecated": "This endpoint will be depreciated in v1.0.0. It will be replaced with the bulk endpoint currently found at /things/bulk.", - } - } - - return map[string]string{} -} - -func (res thingRes) Empty() bool { - return true -} - -type shareThingRes struct{} - -func (res shareThingRes) Code() int { - return http.StatusOK -} - -func (res shareThingRes) Headers() map[string]string { - return map[string]string{} -} - -func (res shareThingRes) Empty() bool { - return false -} - -type thingsRes struct { - Things []thingRes `json:"things"` - created bool -} - -func (res thingsRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res thingsRes) Headers() map[string]string { - return map[string]string{} -} - -func (res thingsRes) Empty() bool { - return false -} - -type viewThingRes struct { - ID string `json:"id"` - Owner string `json:"-"` - Name string `json:"name,omitempty"` - Key string `json:"key"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (res viewThingRes) Code() int { - return http.StatusOK -} - -func (res viewThingRes) Headers() map[string]string { - return map[string]string{} -} - -func (res viewThingRes) Empty() bool { - return false -} - -type thingsPageRes struct { - pageRes - Things []viewThingRes `json:"things"` -} - -func (res thingsPageRes) Code() int { - return http.StatusOK -} - -func (res thingsPageRes) Headers() map[string]string { - return map[string]string{} -} - -func (res thingsPageRes) Empty() bool { - return false -} - -type channelRes struct { - ID string `json:"id"` - Name string `json:"name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - created bool -} - -func (res channelRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res channelRes) Headers() map[string]string { - if res.created { - return map[string]string{ - "Location": fmt.Sprintf("/channels/%s", res.ID), - "Warning-Deprecated": "This endpoint will be depreciated in v1.0.0. It will be replaced with the bulk endpoint currently found at /channels/bulk.", - } - } - - return map[string]string{} -} - -func (res channelRes) Empty() bool { - return true -} - -type channelsRes struct { - Channels []channelRes `json:"channels"` - created bool -} - -func (res channelsRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res channelsRes) Headers() map[string]string { - return map[string]string{} -} - -func (res channelsRes) Empty() bool { - return false -} - -type viewChannelRes struct { - ID string `json:"id"` - Owner string `json:"-"` - Name string `json:"name,omitempty"` - Things []viewThingRes `json:"connected,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (res viewChannelRes) Code() int { - return http.StatusOK -} - -func (res viewChannelRes) Headers() map[string]string { - return map[string]string{} -} - -func (res viewChannelRes) Empty() bool { - return false -} - -type channelsPageRes struct { - pageRes - Channels []viewChannelRes `json:"channels"` -} - -func (res channelsPageRes) Code() int { - return http.StatusOK -} - -func (res channelsPageRes) Headers() map[string]string { - return map[string]string{} -} - -func (res channelsPageRes) Empty() bool { - return false -} - -type connectThingRes struct{} - -func (res connectThingRes) Code() int { - return http.StatusOK -} - -func (res connectThingRes) Headers() map[string]string { - return map[string]string{ - "Warning-Deprecated": "This endpoint will be depreciated in v1.0.0. It will be replaced with the bulk endpoint found at /connect.", - } -} - -func (res connectThingRes) Empty() bool { - return true -} - -type connectRes struct{} - -func (res connectRes) Code() int { - return http.StatusOK -} - -func (res connectRes) Headers() map[string]string { - return map[string]string{} -} - -func (res connectRes) Empty() bool { - return true -} - -type disconnectRes struct{} - -func (res disconnectRes) Code() int { - return http.StatusOK -} - -func (res disconnectRes) Headers() map[string]string { - return map[string]string{} -} - -func (res disconnectRes) Empty() bool { - return true -} - -type disconnectThingRes struct{} - -func (res disconnectThingRes) Code() int { - return http.StatusNoContent -} - -func (res disconnectThingRes) Headers() map[string]string { - return map[string]string{} -} - -func (res disconnectThingRes) Empty() bool { - return true -} - -type pageRes struct { - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` - Order string `json:"order"` - Dir string `json:"direction"` -} diff --git a/things/api/things/http/transport.go b/things/api/things/http/transport.go deleted file mode 100644 index 43a2d9e39d..0000000000 --- a/things/api/things/http/transport.go +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package http - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/internal/apiutil" - log "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - opentracing "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -const ( - contentType = "application/json" - offsetKey = "offset" - limitKey = "limit" - nameKey = "name" - orderKey = "order" - dirKey = "dir" - metadataKey = "metadata" - disconnKey = "disconnected" - sharedKey = "shared" - defOffset = 0 - defLimit = 10 -) - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(tracer opentracing.Tracer, svc things.Service, logger log.Logger) http.Handler { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - - r := bone.New() - - r.Post("/things", kithttp.NewServer( - kitot.TraceServer(tracer, "create_thing")(createThingEndpoint(svc)), - decodeThingCreation, - encodeResponse, - opts..., - )) - - r.Post("/things/bulk", kithttp.NewServer( - kitot.TraceServer(tracer, "create_things")(createThingsEndpoint(svc)), - decodeThingsCreation, - encodeResponse, - opts..., - )) - - r.Post("/things/:thingID/share", kithttp.NewServer( - kitot.TraceServer(tracer, "share_thing")(shareThingEndpoint(svc)), - decodeShareThing, - encodeResponse, - opts..., - )) - - r.Patch("/things/:thingID/key", kithttp.NewServer( - kitot.TraceServer(tracer, "update_key")(updateKeyEndpoint(svc)), - decodeKeyUpdate, - encodeResponse, - opts..., - )) - - r.Put("/things/:thingID", kithttp.NewServer( - kitot.TraceServer(tracer, "update_thing")(updateThingEndpoint(svc)), - decodeThingUpdate, - encodeResponse, - opts..., - )) - - r.Delete("/things/:thingID", kithttp.NewServer( - kitot.TraceServer(tracer, "remove_thing")(removeThingEndpoint(svc)), - decodeThingView, - encodeResponse, - opts..., - )) - - r.Get("/things/:thingID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_thing")(viewThingEndpoint(svc)), - decodeThingView, - encodeResponse, - opts..., - )) - - r.Get("/things/:thingID/channels", kithttp.NewServer( - kitot.TraceServer(tracer, "list_channels_by_thing")(listChannelsByThingEndpoint(svc)), - decodeThingListByConnection, - encodeResponse, - opts..., - )) - - r.Get("/things", kithttp.NewServer( - kitot.TraceServer(tracer, "list_things")(listThingsEndpoint(svc)), - decodeList, - encodeResponse, - opts..., - )) - - r.Post("/things/search", kithttp.NewServer( - kitot.TraceServer(tracer, "search_things")(listThingsEndpoint(svc)), - decodeListByMetadata, - encodeResponse, - opts..., - )) - - r.Post("/channels", kithttp.NewServer( - kitot.TraceServer(tracer, "create_channel")(createChannelEndpoint(svc)), - decodeChannelCreation, - encodeResponse, - opts..., - )) - - r.Post("/channels/bulk", kithttp.NewServer( - kitot.TraceServer(tracer, "create_channels")(createChannelsEndpoint(svc)), - decodeChannelsCreation, - encodeResponse, - opts..., - )) - - r.Put("/channels/:chanID", kithttp.NewServer( - kitot.TraceServer(tracer, "update_channel")(updateChannelEndpoint(svc)), - decodeChannelUpdate, - encodeResponse, - opts..., - )) - - r.Delete("/channels/:chanID", kithttp.NewServer( - kitot.TraceServer(tracer, "remove_channel")(removeChannelEndpoint(svc)), - decodeChannelView, - encodeResponse, - opts..., - )) - - r.Get("/channels/:chanID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_channel")(viewChannelEndpoint(svc)), - decodeChannelView, - encodeResponse, - opts..., - )) - - r.Get("/channels/:chanID/things", kithttp.NewServer( - kitot.TraceServer(tracer, "list_things_by_channel")(listThingsByChannelEndpoint(svc)), - decodeChannelListByConnection, - encodeResponse, - opts..., - )) - - r.Get("/channels", kithttp.NewServer( - kitot.TraceServer(tracer, "list_channels")(listChannelsEndpoint(svc)), - decodeList, - encodeResponse, - opts..., - )) - - r.Post("/connect", kithttp.NewServer( - kitot.TraceServer(tracer, "connect")(connectEndpoint(svc)), - decodeConnectList, - encodeResponse, - opts..., - )) - - r.Put("/disconnect", kithttp.NewServer( - kitot.TraceServer(tracer, "disconnect")(disconnectEndpoint(svc)), - decodeConnectList, - encodeResponse, - opts..., - )) - - r.Put("/channels/:chanID/things/:thingID", kithttp.NewServer( - kitot.TraceServer(tracer, "connect_thing")(connectThingEndpoint(svc)), - decodeConnectThing, - encodeResponse, - opts..., - )) - - r.Delete("/channels/:chanID/things/:thingID", kithttp.NewServer( - kitot.TraceServer(tracer, "disconnect_thing")(disconnectThingEndpoint(svc)), - decodeConnectThing, - encodeResponse, - opts..., - )) - - r.Get("/groups/:groupID", kithttp.NewServer( - kitot.TraceServer(tracer, "list_members")(listMembersEndpoint(svc)), - decodeListMembersRequest, - encodeResponse, - opts..., - )) - - r.GetFunc("/health", mainflux.Health("things")) - r.Handle("/metrics", promhttp.Handler()) - - return r -} - -func decodeThingCreation(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := createThingReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeThingsCreation(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := createThingsReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req.Things); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeShareThing(ctx context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := shareThingReq{ - token: apiutil.ExtractBearerToken(r), - thingID: bone.GetValue(r, "thingID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeThingUpdate(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := updateThingReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "thingID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeKeyUpdate(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := updateKeyReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "thingID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeChannelCreation(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := createChannelReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeChannelsCreation(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := createChannelsReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req.Channels); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeChannelUpdate(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := updateChannelReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "chanID"), - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeChannelView(_ context.Context, r *http.Request) (interface{}, error) { - req := viewResourceReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "chanID"), - } - - return req, nil -} -func decodeThingView(_ context.Context, r *http.Request) (interface{}, error) { - req := viewResourceReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "thingID"), - } - - return req, nil -} - -func decodeList(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - n, err := apiutil.ReadStringQuery(r, nameKey, "") - if err != nil { - return nil, err - } - - or, err := apiutil.ReadStringQuery(r, orderKey, "") - if err != nil { - return nil, err - } - - d, err := apiutil.ReadStringQuery(r, dirKey, "") - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - shared, err := apiutil.ReadBoolQuery(r, sharedKey, false) - if err != nil { - return nil, err - } - - req := listResourcesReq{ - token: apiutil.ExtractBearerToken(r), - pageMetadata: things.PageMetadata{ - Offset: o, - Limit: l, - Name: n, - Order: or, - Dir: d, - Metadata: m, - FetchSharedThings: shared, - }, - } - - return req, nil -} - -func decodeListByMetadata(_ context.Context, r *http.Request) (interface{}, error) { - req := listResourcesReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req.pageMetadata); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeChannelListByConnection(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - c, err := apiutil.ReadBoolQuery(r, disconnKey, false) - if err != nil { - return nil, err - } - - or, err := apiutil.ReadStringQuery(r, orderKey, "") - if err != nil { - return nil, err - } - - d, err := apiutil.ReadStringQuery(r, dirKey, "") - if err != nil { - return nil, err - } - - req := listByConnectionReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "chanID"), - pageMetadata: things.PageMetadata{ - Offset: o, - Limit: l, - Disconnected: c, - Order: or, - Dir: d, - }, - } - - return req, nil -} - -func decodeThingListByConnection(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - c, err := apiutil.ReadBoolQuery(r, disconnKey, false) - if err != nil { - return nil, err - } - - or, err := apiutil.ReadStringQuery(r, orderKey, "") - if err != nil { - return nil, err - } - - d, err := apiutil.ReadStringQuery(r, dirKey, "") - if err != nil { - return nil, err - } - - req := listByConnectionReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "thingID"), - pageMetadata: things.PageMetadata{ - Offset: o, - Limit: l, - Disconnected: c, - Order: or, - Dir: d, - }, - } - - return req, nil -} - -func decodeConnectThing(_ context.Context, r *http.Request) (interface{}, error) { - req := connectThingReq{ - token: apiutil.ExtractBearerToken(r), - chanID: bone.GetValue(r, "chanID"), - thingID: bone.GetValue(r, "thingID"), - } - - return req, nil -} - -func decodeConnectList(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := connectReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - - req := listThingsGroupReq{ - token: apiutil.ExtractBearerToken(r), - groupID: bone.GetValue(r, "groupID"), - pageMetadata: things.PageMetadata{ - Offset: o, - Limit: l, - Metadata: m, - }, - } - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - w.Header().Set("Content-Type", contentType) - - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - // ErrNotFound can be masked by ErrAuthentication, but it has priority. - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - case errors.Contains(err, errors.ErrAuthentication), - err == apiutil.ErrBearerToken: - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrAuthorization): - w.WriteHeader(http.StatusForbidden) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - case errors.Contains(err, errors.ErrInvalidQueryParams), - errors.Contains(err, errors.ErrMalformedEntity), - err == apiutil.ErrNameSize, - err == apiutil.ErrEmptyList, - err == apiutil.ErrMissingID, - err == apiutil.ErrMalformedPolicy, - err == apiutil.ErrBearerKey, - err == apiutil.ErrLimitSize, - err == apiutil.ErrOffsetSize, - err == apiutil.ErrInvalidOrder, - err == apiutil.ErrInvalidDirection, - err == apiutil.ErrInvalidIDFormat: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrConflict): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrScanMetadata): - w.WriteHeader(http.StatusUnprocessableEntity) - - case errors.Contains(err, errors.ErrCreateEntity), - errors.Contains(err, errors.ErrUpdateEntity), - errors.Contains(err, errors.ErrViewEntity), - errors.Contains(err, errors.ErrRemoveEntity): - w.WriteHeader(http.StatusInternalServerError) - - case errors.Contains(err, uuid.ErrGeneratingID): - w.WriteHeader(http.StatusInternalServerError) - - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/things/channels.go b/things/channels.go deleted file mode 100644 index a8ca01ab51..0000000000 --- a/things/channels.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package things - -import ( - "context" -) - -// Channel represents a Mainflux "communication group". This group contains the -// things that can exchange messages between each other. -type Channel struct { - ID string - Owner string - Name string - Metadata map[string]interface{} -} - -// ChannelsPage contains page related metadata as well as list of channels that -// belong to this page. -type ChannelsPage struct { - PageMetadata - Channels []Channel -} - -// ChannelRepository specifies a channel persistence API. -type ChannelRepository interface { - // Save persists multiple channels. Channels are saved using a transaction. If one channel - // fails then none will be saved. Successful operation is indicated by non-nil - // error response. - Save(ctx context.Context, chs ...Channel) ([]Channel, error) - - // Update performs an update to the existing channel. A non-nil error is - // returned to indicate operation failure. - Update(ctx context.Context, c Channel) error - - // RetrieveByID retrieves the channel having the provided identifier, that is owned - // by the specified user. - RetrieveByID(ctx context.Context, owner, id string) (Channel, error) - - // RetrieveAll retrieves the subset of channels owned by the specified user. - RetrieveAll(ctx context.Context, owner string, pm PageMetadata) (ChannelsPage, error) - - // RetrieveByThing retrieves the subset of channels owned by the specified - // user and have specified thing connected or not connected to them. - RetrieveByThing(ctx context.Context, owner, thID string, pm PageMetadata) (ChannelsPage, error) - - // Remove removes the channel having the provided identifier, that is owned - // by the specified user. - Remove(ctx context.Context, owner, id string) error - - // Connect adds things to the channels list of connected things. - Connect(ctx context.Context, owner string, chIDs, thIDs []string) error - - // Disconnect removes things from the channels list of connected - // things. - Disconnect(ctx context.Context, owner string, chIDs, thIDs []string) error - - // HasThing determines whether the thing with the provided access key, is - // "connected" to the specified channel. If that's the case, it returns - // thing's ID. - HasThing(ctx context.Context, chanID, key string) (string, error) - - // HasThingByID determines whether the thing with the provided ID, is - // "connected" to the specified channel. If that's the case, then - // returned error will be nil. - HasThingByID(ctx context.Context, chanID, thingID string) error -} - -// ChannelCache contains channel-thing connection caching interface. -type ChannelCache interface { - // Connect channel thing connection. - Connect(context.Context, string, string) error - - // HasThing checks if thing is connected to channel. - HasThing(context.Context, string, string) bool - - // Disconnects thing from channel. - Disconnect(context.Context, string, string) error - - // Removes channel from cache. - Remove(context.Context, string) error -} diff --git a/things/clients/api/endpoints.go b/things/clients/api/endpoints.go new file mode 100644 index 0000000000..9d9f9a5127 --- /dev/null +++ b/things/clients/api/endpoints.go @@ -0,0 +1,247 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/things/clients" +) + +func createClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createClientReq) + if err := req.validate(); err != nil { + return createClientRes{}, err + } + client, err := svc.CreateThings(ctx, req.token, req.client) + if err != nil { + return createClientRes{}, err + } + ucr := createClientRes{ + Client: client[0], + created: true, + } + + return ucr, nil + } +} + +func createClientsEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createClientsReq) + if err := req.validate(); err != nil { + return clientsPageRes{}, err + } + page, err := svc.CreateThings(ctx, req.token, req.Clients...) + if err != nil { + return clientsPageRes{}, err + } + res := clientsPageRes{ + pageRes: pageRes{ + Total: uint64(len(page)), + }, + Clients: []viewClientRes{}, + } + for _, c := range page { + res.Clients = append(res.Clients, viewClientRes{Client: c}) + } + return res, nil + } +} + +func viewClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewClientReq) + if err := req.validate(); err != nil { + return nil, err + } + + c, err := svc.ViewClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return viewClientRes{Client: c}, nil + } +} + +func listClientsEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listClientsReq) + if err := req.validate(); err != nil { + return mfclients.ClientsPage{}, err + } + + pm := mfclients.Page{ + SharedBy: req.sharedBy, + Status: req.status, + Offset: req.offset, + Limit: req.limit, + Owner: req.owner, + Name: req.name, + Tag: req.tag, + Metadata: req.metadata, + } + page, err := svc.ListClients(ctx, req.token, pm) + if err != nil { + return mfclients.ClientsPage{}, err + } + + res := clientsPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Clients: []viewClientRes{}, + } + for _, c := range page.Clients { + res.Clients = append(res.Clients, viewClientRes{Client: c}) + } + + return res, nil + } +} + +func listMembersEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMembersReq) + if err := req.validate(); err != nil { + return memberPageRes{}, err + } + page, err := svc.ListClientsByGroup(ctx, req.token, req.groupID, req.Page) + if err != nil { + return memberPageRes{}, err + } + return buildMembersResponse(page), nil + } +} + +func updateClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientReq) + if err := req.validate(); err != nil { + return nil, err + } + + cli := mfclients.Client{ + ID: req.id, + Name: req.Name, + Metadata: req.Metadata, + } + client, err := svc.UpdateClient(ctx, req.token, cli) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func updateClientTagsEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientTagsReq) + if err := req.validate(); err != nil { + return nil, err + } + + cli := mfclients.Client{ + ID: req.id, + Tags: req.Tags, + } + client, err := svc.UpdateClientTags(ctx, req.token, cli) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func shareClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(shareClientReq) + if err := req.validate(); err != nil { + return nil, err + } + if err := svc.ShareClient(ctx, req.token, req.clientID, req.Policies, req.UserIDs); err != nil { + return nil, err + } + return shareClientRes{}, nil + } +} + +func updateClientSecretEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientCredentialsReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.UpdateClientSecret(ctx, req.token, req.id, req.Secret) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func updateClientOwnerEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientOwnerReq) + if err := req.validate(); err != nil { + return nil, err + } + + cli := mfclients.Client{ + ID: req.id, + Owner: req.Owner, + } + + client, err := svc.UpdateClientOwner(ctx, req.token, cli) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func enableClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeClientStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.EnableClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return deleteClientRes{Client: client}, nil + } +} + +func disableClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeClientStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.DisableClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return deleteClientRes{Client: client}, nil + } +} + +func buildMembersResponse(cp mfclients.MembersPage) memberPageRes { + res := memberPageRes{ + pageRes: pageRes{ + Total: cp.Total, + Offset: cp.Offset, + Limit: cp.Limit, + }, + Members: []viewMembersRes{}, + } + for _, c := range cp.Members { + res.Members = append(res.Members, viewMembersRes{Client: c}) + } + return res +} diff --git a/things/clients/api/logging.go b/things/clients/api/logging.go new file mode 100644 index 0000000000..aad43ce486 --- /dev/null +++ b/things/clients/api/logging.go @@ -0,0 +1,166 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/things/clients" +) + +var _ clients.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc clients.Service +} + +func LoggingMiddleware(svc clients.Service, logger mflog.Logger) clients.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) CreateThings(ctx context.Context, token string, clients ...mfclients.Client) (cs []mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method create_things %d things using token %s took %s to complete", len(cs), token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.CreateThings(ctx, token, clients...) +} + +func (lm *loggingMiddleware) ViewClient(ctx context.Context, token, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method view_thing for thing with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ViewClient(ctx, token, id) +} + +func (lm *loggingMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (cp mfclients.ClientsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_things using token %s took %s to complete", token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListClients(ctx, token, pm) +} + +func (lm *loggingMiddleware) UpdateClient(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_thing_name_and_metadata for thing with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClient(ctx, token, client) +} + +func (lm *loggingMiddleware) UpdateClientTags(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_thing_tags for thing with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientTags(ctx, token, client) +} + +func (lm *loggingMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_thing_secret for thing with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) +} + +func (lm *loggingMiddleware) UpdateClientOwner(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_thing_owner for thing with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientOwner(ctx, token, client) +} + +func (lm *loggingMiddleware) EnableClient(ctx context.Context, token string, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method enable_thing for thing with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.EnableClient(ctx, token, id) +} + +func (lm *loggingMiddleware) DisableClient(ctx context.Context, token string, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method disable_thing for thing with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DisableClient(ctx, token, id) +} + +func (lm *loggingMiddleware) ListClientsByGroup(ctx context.Context, token, channelID string, cp mfclients.Page) (mp mfclients.MembersPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_things_by_channel for channel with id %s using token %s took %s to complete", channelID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListClientsByGroup(ctx, token, channelID, cp) +} + +func (lm *loggingMiddleware) ShareClient(ctx context.Context, token, id string, actions, userIDs []string) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method share_thing for thing with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ShareClient(ctx, token, id, actions, userIDs) +} + +func (lm *loggingMiddleware) Identify(ctx context.Context, key string) (id string, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method identify for thing with id %s and key %s took %s to complete", id, key, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.Identify(ctx, key) +} diff --git a/things/clients/api/metrics.go b/things/clients/api/metrics.go new file mode 100644 index 0000000000..4855a103f2 --- /dev/null +++ b/things/clients/api/metrics.go @@ -0,0 +1,123 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/things/clients" +) + +var _ clients.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc clients.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc clients.Service, counter metrics.Counter, latency metrics.Histogram) clients.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) CreateThings(ctx context.Context, token string, clients ...mfclients.Client) ([]mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "register_things").Add(1) + ms.latency.With("method", "register_things").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.CreateThings(ctx, token, clients...) +} + +func (ms *metricsMiddleware) ViewClient(ctx context.Context, token, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_thing").Add(1) + ms.latency.With("method", "view_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewClient(ctx, token, id) +} + +func (ms *metricsMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_things").Add(1) + ms.latency.With("method", "list_things").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListClients(ctx, token, pm) +} + +func (ms *metricsMiddleware) UpdateClient(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_thing_name_and_metadata").Add(1) + ms.latency.With("method", "update_thing_name_and_metadata").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClient(ctx, token, client) +} + +func (ms *metricsMiddleware) UpdateClientTags(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_thing_tags").Add(1) + ms.latency.With("method", "update_thing_tags").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientTags(ctx, token, client) +} + +func (ms *metricsMiddleware) ShareClient(ctx context.Context, token, id string, actions, userIDs []string) error { + defer func(begin time.Time) { + ms.counter.With("method", "share_thing").Add(1) + ms.latency.With("method", "share_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ShareClient(ctx, token, id, actions, userIDs) +} + +func (ms *metricsMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_thing_secret").Add(1) + ms.latency.With("method", "update_thing_secret").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) +} + +func (ms *metricsMiddleware) UpdateClientOwner(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_thing_owner").Add(1) + ms.latency.With("method", "update_thing_owner").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientOwner(ctx, token, client) +} + +func (ms *metricsMiddleware) EnableClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "enable_thing").Add(1) + ms.latency.With("method", "enable_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.EnableClient(ctx, token, id) +} + +func (ms *metricsMiddleware) DisableClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "disable_thing").Add(1) + ms.latency.With("method", "disable_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DisableClient(ctx, token, id) +} + +func (ms *metricsMiddleware) ListClientsByGroup(ctx context.Context, token, groupID string, pm mfclients.Page) (mp mfclients.MembersPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_things_by_channel").Add(1) + ms.latency.With("method", "list_things_by_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListClientsByGroup(ctx, token, groupID, pm) +} + +func (ms *metricsMiddleware) Identify(ctx context.Context, key string) (string, error) { + defer func(begin time.Time) { + ms.counter.With("method", "identify_thing").Add(1) + ms.latency.With("method", "identify_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.Identify(ctx, key) +} diff --git a/things/clients/api/requests.go b/things/clients/api/requests.go new file mode 100644 index 0000000000..0be09df222 --- /dev/null +++ b/things/clients/api/requests.go @@ -0,0 +1,230 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +type createClientReq struct { + client mfclients.Client + token string +} + +func (req createClientReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if len(req.client.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + // Do the validation only if request contains ID + if req.client.ID != "" { + return api.ValidateUUID(req.client.ID) + } + + return nil +} + +type createClientsReq struct { + token string + Clients []mfclients.Client +} + +func (req createClientsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if len(req.Clients) == 0 { + return apiutil.ErrEmptyList + } + + for _, client := range req.Clients { + if client.ID != "" { + if err := api.ValidateUUID(client.ID); err != nil { + return err + } + } + if len(client.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + } + + return nil +} + +type viewClientReq struct { + token string + id string +} + +func (req viewClientReq) validate() error { + return nil +} + +type listClientsReq struct { + token string + status mfclients.Status + offset uint64 + limit uint64 + name string + tag string + owner string + sharedBy string + visibility string + metadata mfclients.Metadata +} + +func (req listClientsReq) validate() error { + if req.limit > api.MaxLimitSize || req.limit < 1 { + return apiutil.ErrLimitSize + } + if req.visibility != "" && + req.visibility != api.AllVisibility && + req.visibility != api.MyVisibility && + req.visibility != api.SharedVisibility { + return apiutil.ErrInvalidVisibilityType + } + if req.limit > api.MaxLimitSize || req.limit < 1 { + return apiutil.ErrLimitSize + } + + if len(req.name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + + return nil +} + +type listMembersReq struct { + mfclients.Page + token string + groupID string +} + +func (req listMembersReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.groupID == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type updateClientReq struct { + token string + id string + Name string `json:"name,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +func (req updateClientReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + return nil +} + +type updateClientTagsReq struct { + id string + token string + Tags []string `json:"tags,omitempty"` +} + +func (req updateClientTagsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type updateClientOwnerReq struct { + id string + token string + Owner string `json:"owner,omitempty"` +} + +func (req updateClientOwnerReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + if req.Owner == "" { + return apiutil.ErrMissingOwner + } + return nil +} + +type updateClientCredentialsReq struct { + token string + id string + Secret string `json:"secret,omitempty"` +} + +func (req updateClientCredentialsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + if req.Secret == "" { + return apiutil.ErrBearerKey + } + + return nil +} + +type changeClientStatusReq struct { + token string + id string +} + +func (req changeClientStatusReq) validate() error { + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type shareClientReq struct { + token string + clientID string + UserIDs []string `json:"user_ids"` + Policies []string `json:"policies"` +} + +func (req shareClientReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.clientID == "" || len(req.UserIDs) == 0 { + return apiutil.ErrMissingID + } + + if len(req.Policies) == 0 { + return apiutil.ErrEmptyList + } + + return nil +} diff --git a/things/clients/api/responses.go b/things/clients/api/responses.go new file mode 100644 index 0000000000..9b1122f593 --- /dev/null +++ b/things/clients/api/responses.go @@ -0,0 +1,164 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/mainflux/mainflux" + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +var ( + _ mainflux.Response = (*viewClientRes)(nil) + _ mainflux.Response = (*createClientRes)(nil) + _ mainflux.Response = (*deleteClientRes)(nil) + _ mainflux.Response = (*clientsPageRes)(nil) + _ mainflux.Response = (*viewMembersRes)(nil) + _ mainflux.Response = (*memberPageRes)(nil) + _ mainflux.Response = (*shareClientRes)(nil) +) + +type pageRes struct { + Limit uint64 `json:"limit,omitempty"` + Offset uint64 `json:"offset,omitempty"` + Total uint64 `json:"total,omitempty"` +} + +type createClientRes struct { + mfclients.Client + created bool +} + +func (res createClientRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res createClientRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/things/%s", res.ID), + } + } + + return map[string]string{} +} + +func (res createClientRes) Empty() bool { + return false +} + +type updateClientRes struct { + mfclients.Client +} + +func (res updateClientRes) Code() int { + return http.StatusOK +} + +func (res updateClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updateClientRes) Empty() bool { + return false +} + +type viewClientRes struct { + mfclients.Client +} + +func (res viewClientRes) Code() int { + return http.StatusOK +} + +func (res viewClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewClientRes) Empty() bool { + return false +} + +type clientsPageRes struct { + pageRes + Clients []viewClientRes `json:"things"` +} + +func (res clientsPageRes) Code() int { + return http.StatusOK +} + +func (res clientsPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res clientsPageRes) Empty() bool { + return false +} + +type viewMembersRes struct { + mfclients.Client +} + +func (res viewMembersRes) Code() int { + return http.StatusOK +} + +func (res viewMembersRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewMembersRes) Empty() bool { + return false +} + +type memberPageRes struct { + pageRes + Members []viewMembersRes `json:"things"` +} + +func (res memberPageRes) Code() int { + return http.StatusOK +} + +func (res memberPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res memberPageRes) Empty() bool { + return false +} + +type deleteClientRes struct { + mfclients.Client +} + +func (res deleteClientRes) Code() int { + return http.StatusOK +} + +func (res deleteClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res deleteClientRes) Empty() bool { + return false +} + +type shareClientRes struct{} + +func (res shareClientRes) Code() int { + return http.StatusOK +} + +func (res shareClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res shareClientRes) Empty() bool { + return false +} diff --git a/things/clients/api/transport.go b/things/clients/api/transport.go new file mode 100644 index 0000000000..fcd91bb76f --- /dev/null +++ b/things/clients/api/transport.go @@ -0,0 +1,330 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc clients.Service, mux *bone.Mux, logger mflog.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + mux.Post("/things", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("create_thing"))(createClientEndpoint(svc)), + decodeCreateClientReq, + api.EncodeResponse, + opts..., + )) + + mux.Post("/things/bulk", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("create_things"))(createClientsEndpoint(svc)), + decodeCreateClientsReq, + api.EncodeResponse, + opts..., + )) + + mux.Get("/things/:thingID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("view_thing"))(viewClientEndpoint(svc)), + decodeViewClient, + api.EncodeResponse, + opts..., + )) + + mux.Get("/things", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_things"))(listClientsEndpoint(svc)), + decodeListClients, + api.EncodeResponse, + opts..., + )) + + mux.Get("/channels/:thingID/things", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_things_by_channel"))(listMembersEndpoint(svc)), + decodeListMembersRequest, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/things/:thingID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_thing_name_and_metadata"))(updateClientEndpoint(svc)), + decodeUpdateClient, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/things/:thingID/tags", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_thing_tags"))(updateClientTagsEndpoint(svc)), + decodeUpdateClientTags, + api.EncodeResponse, + opts..., + )) + + mux.Post("/things/:thingID/share", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("share_thing"))(shareClientEndpoint(svc)), + decodeShareClient, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/things/:thingID/secret", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_thing_secret"))(updateClientSecretEndpoint(svc)), + decodeUpdateClientCredentials, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/things/:thingID/owner", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_thing_owner"))(updateClientOwnerEndpoint(svc)), + decodeUpdateClientOwner, + api.EncodeResponse, + opts..., + )) + + mux.Post("/things/:thingID/enable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("enable_thing"))(enableClientEndpoint(svc)), + decodeChangeClientStatus, + api.EncodeResponse, + opts..., + )) + + mux.Post("/things/:thingID/disable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disable_thing"))(disableClientEndpoint(svc)), + decodeChangeClientStatus, + api.EncodeResponse, + opts..., + )) + + mux.GetFunc("/health", mainflux.Health("things")) + mux.Handle("/metrics", promhttp.Handler()) + return mux +} + +func decodeViewClient(_ context.Context, r *http.Request) (interface{}, error) { + req := viewClientReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + + return req, nil +} + +func decodeShareClient(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + req := shareClientReq{ + token: apiutil.ExtractBearerToken(r), + clientID: bone.GetValue(r, "thingID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeListClients(_ context.Context, r *http.Request) (interface{}, error) { + var sid, oid string + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefClientStatus) + if err != nil { + return nil, err + } + o, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + l, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + m, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + n, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + t, err := apiutil.ReadStringQuery(r, api.TagKey, "") + if err != nil { + return nil, err + } + visibility, err := apiutil.ReadStringQuery(r, api.VisibilityKey, api.MyVisibility) + if err != nil { + return nil, err + } + switch visibility { + case api.MyVisibility: + oid = api.MyVisibility + case api.SharedVisibility: + sid = api.MyVisibility + case api.AllVisibility: + sid = api.MyVisibility + oid = api.MyVisibility + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listClientsReq{ + token: apiutil.ExtractBearerToken(r), + status: st, + offset: o, + limit: l, + metadata: m, + name: n, + tag: t, + sharedBy: sid, + owner: oid, + } + return req, nil +} + +func decodeUpdateClient(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientTags(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientTagsReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientCredentials(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientCredentialsReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientOwner(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientOwnerReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeCreateClientReq(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var c mfclients.Client + if err := json.NewDecoder(r.Body).Decode(&c); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + req := createClientReq{ + client: c, + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func decodeCreateClientsReq(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + c := createClientsReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&c.Clients); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return c, nil +} + +func decodeChangeClientStatus(_ context.Context, r *http.Request) (interface{}, error) { + req := changeClientStatusReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "thingID"), + } + + return req, nil +} + +func decodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefClientStatus) + if err != nil { + return nil, err + } + o, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + l, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + m, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listMembersReq{ + token: apiutil.ExtractBearerToken(r), + Page: mfclients.Page{ + Status: st, + Offset: o, + Limit: l, + Metadata: m, + }, + groupID: bone.GetValue(r, "thingID"), + } + return req, nil +} diff --git a/things/clients/clients.go b/things/clients/clients.go new file mode 100644 index 0000000000..fc8e04075d --- /dev/null +++ b/things/clients/clients.go @@ -0,0 +1,64 @@ +package clients + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/clients" +) + +// Service specifies an API that must be fullfiled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type Service interface { + // CreateThings creates new client. In case of the failed registration, a + // non-nil error value is returned. + CreateThings(ctx context.Context, token string, client ...clients.Client) ([]clients.Client, error) + + // ViewClient retrieves client info for a given client ID and an authorized token. + ViewClient(ctx context.Context, token, id string) (clients.Client, error) + + // ListClients retrieves clients list for a valid auth token. + ListClients(ctx context.Context, token string, pm clients.Page) (clients.ClientsPage, error) + + // ListClientsByGroup retrieves data about subset of things that are + // connected or not connected to specified channel and belong to the user identified by + // the provided key. + ListClientsByGroup(ctx context.Context, token, groupID string, pm clients.Page) (clients.MembersPage, error) + + // UpdateClient updates the client's name and metadata. + UpdateClient(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // UpdateClientTags updates the client's tags. + UpdateClientTags(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // UpdateClientSecret updates the client's secret + UpdateClientSecret(ctx context.Context, token, id, key string) (clients.Client, error) + + // UpdateClientOwner updates the client's owner. + UpdateClientOwner(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // EnableClient logically enableds the client identified with the provided ID + EnableClient(ctx context.Context, token, id string) (clients.Client, error) + + // DisableClient logically disables the client identified with the provided ID + DisableClient(ctx context.Context, token, id string) (clients.Client, error) + + // ShareClient gives actions associated with the thing to the given user IDs. + // The requester user identified by the token has to have a "write" relation + // on the thing in order to share the thing. + ShareClient(ctx context.Context, token, clientID string, actions, userIDs []string) error + + // Identify returns thing ID for given thing key. + Identify(ctx context.Context, key string) (string, error) +} + +// ClientCache contains thing caching interface. +type ClientCache interface { + // Save stores pair thing secret, thing id. + Save(ctx context.Context, thingSecret, thingID string) error + + // ID returns thing ID for given thing secret. + ID(ctx context.Context, thingSecret string) (string, error) + + // Removes thing from cache. + Remove(ctx context.Context, thingID string) error +} diff --git a/things/clients/mocks/auth.go b/things/clients/mocks/auth.go new file mode 100644 index 0000000000..1597ec96cd --- /dev/null +++ b/things/clients/mocks/auth.go @@ -0,0 +1,81 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" + "google.golang.org/grpc" +) + +var _ policies.AuthServiceClient = (*authServiceMock)(nil) + +type MockSubjectSet struct { + Object string + Relation []string +} + +type authServiceMock struct { + users map[string]string + policies map[string][]MockSubjectSet +} + +// NewAuthService creates mock of users service. +func NewAuthService(users map[string]string, policies map[string][]MockSubjectSet) policies.AuthServiceClient { + return &authServiceMock{users, policies} +} + +func (svc authServiceMock) Identify(ctx context.Context, in *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { + if id, ok := svc.users[in.Value]; ok { + return &policies.UserIdentity{Id: id}, nil + } + return nil, errors.ErrAuthentication +} + +func (svc authServiceMock) Issue(ctx context.Context, in *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { + if id, ok := svc.users[in.GetEmail()]; ok { + switch in.Type { + default: + return &policies.Token{Value: id}, nil + } + } + return nil, errors.ErrAuthentication +} + +func (svc authServiceMock) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { + for _, policy := range svc.policies[req.GetSub()] { + for _, r := range policy.Relation { + if r == req.GetAct() && policy.Object == req.GetObj() { + return &policies.AuthorizeRes{Authorized: true}, nil + } + } + + } + return nil, errors.ErrAuthorization +} + +func (svc authServiceMock) AddPolicy(ctx context.Context, in *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { + if len(in.GetAct()) == 0 || in.GetObj() == "" || in.GetSub() == "" { + return &policies.AddPolicyRes{}, errors.ErrMalformedEntity + } + + obj := in.GetObj() + svc.policies[in.GetSub()] = append(svc.policies[in.GetSub()], MockSubjectSet{Object: obj, Relation: in.GetAct()}) + return &policies.AddPolicyRes{Authorized: true}, nil +} + +func (svc authServiceMock) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { + res := policies.ListPoliciesRes{} + for key := range svc.policies { + res.Objects = append(res.Objects, key) + } + return &res, nil +} + +func (svc authServiceMock) DeletePolicy(ctx context.Context, in *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { + // Not implemented yet + return &policies.DeletePolicyRes{Deleted: true}, nil +} diff --git a/things/clients/mocks/clients.go b/things/clients/mocks/clients.go new file mode 100644 index 0000000000..b1a8addd4f --- /dev/null +++ b/things/clients/mocks/clients.go @@ -0,0 +1,139 @@ +package mocks + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/stretchr/testify/mock" +) + +const WrongID = "wrongID" + +var _ mfclients.Repository = (*ClientRepository)(nil) + +type ClientRepository struct { + mock.Mock +} + +// RetrieveByIdentity retrieves client by its unique credentials +func (*ClientRepository) RetrieveByIdentity(ctx context.Context, identity string) (mfclients.Client, error) { + return mfclients.Client{}, nil +} + +func (m *ClientRepository) ChangeStatus(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + if client.Status != mfclients.EnabledStatus && client.Status != mfclients.DisabledStatus { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) Members(ctx context.Context, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + ret := m.Called(ctx, groupID, pm) + if groupID == WrongID { + return mfclients.MembersPage{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.MembersPage), ret.Error(1) +} + +func (m *ClientRepository) RetrieveAll(ctx context.Context, pm mfclients.Page) (mfclients.ClientsPage, error) { + ret := m.Called(ctx, pm) + + return ret.Get(0).(mfclients.ClientsPage), ret.Error(1) +} + +func (m *ClientRepository) RetrieveByID(ctx context.Context, id string) (mfclients.Client, error) { + ret := m.Called(ctx, id) + + if id == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) RetrieveBySecret(ctx context.Context, secret string) (mfclients.Client, error) { + ret := m.Called(ctx, secret) + + if secret == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) Save(ctx context.Context, clis ...mfclients.Client) ([]mfclients.Client, error) { + ret := m.Called(ctx, clis) + for _, cli := range clis { + if cli.Owner == WrongID { + return []mfclients.Client{}, errors.ErrMalformedEntity + } + if cli.Credentials.Secret == "" { + return []mfclients.Client{}, errors.ErrMalformedEntity + } + } + return clis, ret.Error(1) +} + +func (m *ClientRepository) Update(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateIdentity(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + if client.Credentials.Identity == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateSecret(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + if client.Credentials.Secret == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateTags(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateOwner(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} diff --git a/things/clients/mocks/commons.go b/things/clients/mocks/commons.go new file mode 100644 index 0000000000..6e10ecc0be --- /dev/null +++ b/things/clients/mocks/commons.go @@ -0,0 +1,66 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "fmt" + "sort" + "strconv" + + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +const uuidLen = 36 + +// Since mocks will store data in map, and they need to resemble the real +// identifiers as much as possible, a key will be created as combination of +// owner and their own identifiers. This will allow searching either by +// prefix or suffix. +func key(owner string, id string) string { + return fmt.Sprintf("%s-%s", owner, id) +} + +func sortThings(pm mfclients.Page, ths []mfclients.Client) []mfclients.Client { + switch pm.Order { + case "name": + if pm.Dir == "asc" { + sort.SliceStable(ths, func(i, j int) bool { + return ths[i].Name < ths[j].Name + }) + } + if pm.Dir == "desc" { + sort.SliceStable(ths, func(i, j int) bool { + return ths[i].Name > ths[j].Name + }) + } + case "id": + if pm.Dir == "asc" { + sort.SliceStable(ths, func(i, j int) bool { + return ths[i].ID < ths[j].ID + }) + } + if pm.Dir == "desc" { + sort.SliceStable(ths, func(i, j int) bool { + return ths[i].ID > ths[j].ID + }) + } + default: + sort.SliceStable(ths, func(i, j int) bool { + return ths[i].ID < ths[j].ID + }) + } + + return ths +} + +func parseID(ID string) (id uint64) { + var serialNum string + + if len(ID) == uuidLen { + serialNum = ID[len(ID)-6:] + } + id, _ = strconv.ParseUint(serialNum, 10, 64) + + return +} diff --git a/things/clients/mocks/things.go b/things/clients/mocks/things.go new file mode 100644 index 0000000000..43d8c9bd26 --- /dev/null +++ b/things/clients/mocks/things.go @@ -0,0 +1,341 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + "fmt" + "strings" + "sync" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" +) + +var _ mfclients.Repository = (*clientRepoMock)(nil) + +type Connection struct { + chanID string + thing mfclients.Client + connected bool +} + +type clientRepoMock struct { + mu sync.Mutex + counter uint64 + conns chan Connection + tconns map[string]map[string]mfclients.Client + things map[string]mfclients.Client +} + +// NewThingRepository creates in-memory thing repository. +func NewThingRepository(conns chan Connection) mfclients.Repository { + repo := &clientRepoMock{ + conns: conns, + things: make(map[string]mfclients.Client), + tconns: make(map[string]map[string]mfclients.Client), + } + go func(conns chan Connection, repo *clientRepoMock) { + for conn := range conns { + if !conn.connected { + repo.disconnect(conn) + continue + } + repo.connect(conn) + } + }(conns, repo) + + return repo +} + +func (*clientRepoMock) UpdateIdentity(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + return mfclients.Client{}, nil +} + +func (*clientRepoMock) RetrieveByIdentity(ctx context.Context, identity string) (mfclients.Client, error) { + return mfclients.Client{}, nil +} + +func (trm *clientRepoMock) Save(_ context.Context, clis ...mfclients.Client) ([]mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + for _, cli := range clis { + for _, th := range trm.things { + if th.Credentials.Secret == cli.Credentials.Secret { + return []mfclients.Client{}, errors.ErrConflict + } + } + + trm.counter++ + if cli.ID == "" { + cli.ID = fmt.Sprintf("%03d", trm.counter) + } + trm.things[key(cli.Owner, cli.ID)] = cli + } + return clis, nil +} + +func (trm *clientRepoMock) Update(_ context.Context, thing mfclients.Client) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + dbKey := key(thing.Owner, thing.ID) + + if _, ok := trm.things[dbKey]; !ok { + return mfclients.Client{}, errors.ErrNotFound + } + + trm.things[dbKey] = thing + + return trm.things[dbKey], nil +} + +func (trm *clientRepoMock) UpdateSecret(_ context.Context, client mfclients.Client) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + for _, th := range trm.things { + if th.Credentials.Secret == client.Credentials.Secret { + return mfclients.Client{}, errors.ErrConflict + } + } + + dbKey := key(client.Owner, client.ID) + + th, ok := trm.things[dbKey] + if !ok { + return mfclients.Client{}, errors.ErrNotFound + } + + th.Credentials.Secret = client.Credentials.Secret + trm.things[dbKey] = th + + return trm.things[dbKey], nil +} + +func (trm *clientRepoMock) UpdateOwner(_ context.Context, client mfclients.Client) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + dbKey := key(client.Owner, client.ID) + + th, ok := trm.things[dbKey] + if !ok { + return mfclients.Client{}, errors.ErrNotFound + } + + th.Owner = client.Owner + trm.things[dbKey] = th + + return trm.things[dbKey], nil +} + +func (trm *clientRepoMock) UpdateTags(_ context.Context, client mfclients.Client) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + dbKey := key(client.Owner, client.ID) + + th, ok := trm.things[dbKey] + if !ok { + return mfclients.Client{}, errors.ErrNotFound + } + + th.Tags = client.Tags + trm.things[dbKey] = th + + return trm.things[dbKey], nil +} + +func (trm *clientRepoMock) RetrieveByID(_ context.Context, id string) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + if c, ok := trm.things[id]; ok { + return c, nil + } + + return mfclients.Client{}, errors.ErrNotFound +} + +func (trm *clientRepoMock) RetrieveAll(_ context.Context, pm mfclients.Page) (mfclients.ClientsPage, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + first := uint64(pm.Offset) + 1 + last := first + uint64(pm.Limit) + + var ths []mfclients.Client + + // This obscure way to examine map keys is enforced by the key structure + // itself (see mocks/commons.go). + prefix := "owner" + for k, v := range trm.things { + id := parseID(v.ID) + if strings.HasPrefix(k, prefix) && id >= first && id < last { + ths = append(ths, v) + } + } + + // Sort Things list + ths = sortThings(pm, ths) + + page := mfclients.ClientsPage{ + Clients: ths, + Page: mfclients.Page{ + Total: trm.counter, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (trm *clientRepoMock) Members(_ context.Context, chID string, pm mfclients.Page) (mfclients.MembersPage, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + if pm.Limit <= 0 { + return mfclients.MembersPage{}, nil + } + + first := uint64(pm.Offset) + 1 + last := first + uint64(pm.Limit) + + var ths []mfclients.Client + + // Append connected or not connected channels + switch pm.Disconnected { + case false: + for _, co := range trm.tconns[chID] { + id := parseID(co.ID) + if id >= first && id < last { + ths = append(ths, co) + } + } + default: + for _, th := range trm.things { + conn := false + id := parseID(th.ID) + if id >= first && id < last { + for _, co := range trm.tconns[chID] { + if th.ID == co.ID { + conn = true + } + } + + // Append if not found in connections list + if !conn { + ths = append(ths, th) + } + } + } + } + + // Sort Things by Channel list + ths = sortThings(pm, ths) + + page := mfclients.MembersPage{ + Members: ths, + Page: mfclients.Page{ + Total: trm.counter, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (trm *clientRepoMock) ChangeStatus(_ context.Context, client mfclients.Client) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + th := trm.things[client.ID] + th.Status = client.Status + trm.things[client.ID] = th + return th, nil +} + +func (trm *clientRepoMock) RetrieveBySecret(_ context.Context, key string) (mfclients.Client, error) { + trm.mu.Lock() + defer trm.mu.Unlock() + + for _, thing := range trm.things { + if thing.Credentials.Secret == key { + return thing, nil + } + } + + return mfclients.Client{}, errors.ErrNotFound +} + +func (trm *clientRepoMock) connect(conn Connection) { + trm.mu.Lock() + defer trm.mu.Unlock() + + if _, ok := trm.tconns[conn.chanID]; !ok { + trm.tconns[conn.chanID] = make(map[string]mfclients.Client) + } + trm.tconns[conn.chanID][conn.thing.ID] = conn.thing +} + +func (trm *clientRepoMock) disconnect(conn Connection) { + trm.mu.Lock() + defer trm.mu.Unlock() + + if conn.thing.ID == "" { + delete(trm.tconns, conn.chanID) + return + } + delete(trm.tconns[conn.chanID], conn.thing.ID) +} + +type clientCacheMock struct { + mu sync.Mutex + things map[string]string +} + +// NewClientCache returns mock cache instance. +func NewClientCache() clients.ClientCache { + return &clientCacheMock{ + things: make(map[string]string), + } +} + +func (tcm *clientCacheMock) Save(_ context.Context, key, id string) error { + tcm.mu.Lock() + defer tcm.mu.Unlock() + + tcm.things[key] = id + return nil +} + +func (tcm *clientCacheMock) ID(_ context.Context, key string) (string, error) { + tcm.mu.Lock() + defer tcm.mu.Unlock() + + id, ok := tcm.things[key] + if !ok { + return "", errors.ErrNotFound + } + + return id, nil +} + +func (tcm *clientCacheMock) Remove(_ context.Context, id string) error { + tcm.mu.Lock() + defer tcm.mu.Unlock() + + for key, val := range tcm.things { + if val == id { + delete(tcm.things, key) + return nil + } + } + + return nil +} diff --git a/things/clients/postgres/clients.go b/things/clients/postgres/clients.go new file mode 100644 index 0000000000..bb7b382790 --- /dev/null +++ b/things/clients/postgres/clients.go @@ -0,0 +1,462 @@ +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/jackc/pgtype" // required for SQL access + "github.com/mainflux/mainflux/internal/postgres" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/groups" +) + +var _ mfclients.Repository = (*clientRepo)(nil) + +type clientRepo struct { + db postgres.Database +} + +// NewRepository instantiates a PostgreSQL +// implementation of Clients repository. +func NewRepository(db postgres.Database) mfclients.Repository { + return &clientRepo{ + db: db, + } +} + +// RetrieveByIdentity retrieves client by its unique credentials +func (clientRepo) RetrieveByIdentity(ctx context.Context, identity string) (mfclients.Client, error) { + return mfclients.Client{}, nil +} + +func (repo clientRepo) Save(ctx context.Context, cs ...mfclients.Client) ([]mfclients.Client, error) { + tx, err := repo.db.BeginTxx(ctx, nil) + if err != nil { + return []mfclients.Client{}, errors.Wrap(errors.ErrCreateEntity, err) + } + + for _, cli := range cs { + q := `INSERT INTO clients (id, name, tags, owner_id, identity, secret, metadata, created_at, updated_at, updated_by, status) + VALUES (:id, :name, :tags, :owner_id, :identity, :secret, :metadata, :created_at, :updated_at, :updated_by, :status) + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + dbcli, err := toDBClient(cli) + if err != nil { + return []mfclients.Client{}, errors.Wrap(errors.ErrCreateEntity, err) + } + + if _, err := tx.NamedExecContext(ctx, q, dbcli); err != nil { + if err := tx.Rollback(); err != nil { + return []mfclients.Client{}, postgres.HandleError(err, errors.ErrCreateEntity) + } + } + } + if err = tx.Commit(); err != nil { + return []mfclients.Client{}, errors.Wrap(errors.ErrCreateEntity, err) + } + + return cs, nil +} + +func (repo clientRepo) RetrieveByID(ctx context.Context, id string) (mfclients.Client, error) { + q := `SELECT id, name, tags, COALESCE(owner_id, '') AS owner_id, identity, secret, metadata, created_at, updated_at, updated_by, status + FROM clients + WHERE id = $1` + + dbc := dbClient{ + ID: id, + } + + if err := repo.db.QueryRowxContext(ctx, q, id).StructScan(&dbc); err != nil { + if err == sql.ErrNoRows { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + + } + return mfclients.Client{}, errors.Wrap(errors.ErrViewEntity, err) + } + + return toClient(dbc) +} + +func (repo clientRepo) RetrieveBySecret(ctx context.Context, key string) (mfclients.Client, error) { + q := fmt.Sprintf(`SELECT id, name, tags, COALESCE(owner_id, '') AS owner_id, identity, secret, metadata, created_at, updated_at, updated_by, status + FROM clients + WHERE secret = $1 AND status = %d`, mfclients.EnabledStatus) + + dbc := dbClient{ + Secret: key, + } + + if err := repo.db.QueryRowxContext(ctx, q, key).StructScan(&dbc); err != nil { + if err == sql.ErrNoRows { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + + } + return mfclients.Client{}, errors.Wrap(errors.ErrViewEntity, err) + } + + return toClient(dbc) +} + +func (repo clientRepo) RetrieveAll(ctx context.Context, pm mfclients.Page) (mfclients.ClientsPage, error) { + query, err := pageQuery(pm) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + q := fmt.Sprintf(`SELECT c.id, c.name, c.tags, c.identity, c.secret, c.metadata, COALESCE(c.owner_id, '') AS owner_id, c.status, c.created_at + FROM clients c %s ORDER BY c.created_at LIMIT :limit OFFSET :offset;`, query) + + dbPage, err := toDBClientsPage(pm) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + defer rows.Close() + + var items []mfclients.Client + for rows.Next() { + dbc := dbClient{} + if err := rows.StructScan(&dbc); err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + c, err := toClient(dbc) + if err != nil { + return mfclients.ClientsPage{}, err + } + + items = append(items, c) + } + cq := fmt.Sprintf(`SELECT COUNT(*) FROM clients c %s;`, query) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + page := mfclients.ClientsPage{ + Clients: items, + Page: mfclients.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (repo clientRepo) Members(ctx context.Context, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + emq, err := pageQuery(pm) + if err != nil { + return mfclients.MembersPage{}, err + } + + aq := "" + // If not admin, the client needs to have a g_list action on the group + if pm.Subject != "" { + aq = fmt.Sprintf("AND EXISTS (SELECT 1 FROM policies WHERE policies.subject = '%s' AND policies.object = :group_id AND '%s'=ANY(actions))", pm.Subject, pm.Action) + } + + q := fmt.Sprintf(`SELECT c.id, c.name, c.tags, c.metadata, c.identity, c.secret, c.status, c.created_at FROM clients c + INNER JOIN policies ON c.id=policies.subject %s AND policies.object = :group_id %s ORDER BY c.created_at LIMIT :limit OFFSET :offset;`, emq, aq) + dbPage, err := toDBClientsPage(pm) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + dbPage.GroupID = groupID + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + defer rows.Close() + + var items []mfclients.Client + for rows.Next() { + dbc := dbClient{} + if err := rows.StructScan(&dbc); err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + + c, err := toClient(dbc) + if err != nil { + return mfclients.MembersPage{}, err + } + + items = append(items, c) + } + cq := fmt.Sprintf(`SELECT COUNT(*) FROM clients c INNER JOIN policies ON c.id=policies.subject %s AND policies.object = :group_id;`, emq) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + + page := mfclients.MembersPage{ + Members: items, + Page: mfclients.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + return page, nil +} + +func (repo clientRepo) Update(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + var query []string + var upq string + if client.Name != "" { + query = append(query, "name = :name,") + } + if client.Metadata != nil { + query = append(query, "metadata = :metadata,") + } + if len(query) > 0 { + upq = strings.Join(query, " ") + } + client.Status = mfclients.EnabledStatus + q := fmt.Sprintf(`UPDATE clients SET %s updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by`, + upq) + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateTags(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + client.Status = mfclients.EnabledStatus + q := `UPDATE clients SET tags = :tags, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateIdentity(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + client.Status = mfclients.EnabledStatus + q := `UPDATE clients SET identity = :identity, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateSecret(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + client.Status = mfclients.EnabledStatus + q := `UPDATE clients SET secret = :secret, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateOwner(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + client.Status = mfclients.EnabledStatus + q := `UPDATE clients SET owner_id = :owner_id, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) ChangeStatus(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + q := `UPDATE clients SET status = :status WHERE id = :id + RETURNING id, name, tags, identity, secret, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +// generic update function +func (repo clientRepo) update(ctx context.Context, client mfclients.Client, query string) (mfclients.Client, error) { + dbc, err := toDBClient(client) + if err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, query, dbc) + if err != nil { + return mfclients.Client{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbc = dbClient{} + if err := row.StructScan(&dbc); err != nil { + return mfclients.Client{}, err + } + + return toClient(dbc) +} + +type dbClient struct { + ID string `db:"id"` + Name string `db:"name,omitempty"` + Tags pgtype.TextArray `db:"tags,omitempty"` + Identity string `db:"identity"` + Owner string `db:"owner_id,omitempty"` // nullable + Secret string `db:"secret"` + Metadata []byte `db:"metadata,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` + Groups []groups.Group `db:"groups"` + Status mfclients.Status `db:"status"` +} + +func toDBClient(c mfclients.Client) (dbClient, error) { + data := []byte("{}") + if len(c.Metadata) > 0 { + b, err := json.Marshal(c.Metadata) + if err != nil { + return dbClient{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + var tags pgtype.TextArray + if err := tags.Set(c.Tags); err != nil { + return dbClient{}, err + } + var updatedBy *string + if c.UpdatedBy != "" { + updatedBy = &c.UpdatedBy + } + var updatedAt sql.NullTime + if !c.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: c.UpdatedAt, Valid: true} + } + + return dbClient{ + ID: c.ID, + Name: c.Name, + Tags: tags, + Owner: c.Owner, + Identity: c.Credentials.Identity, + Secret: c.Credentials.Secret, + Metadata: data, + CreatedAt: c.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: c.Status, + }, nil +} + +func toClient(c dbClient) (mfclients.Client, error) { + var metadata mfclients.Metadata + if c.Metadata != nil { + if err := json.Unmarshal([]byte(c.Metadata), &metadata); err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + var tags []string + for _, e := range c.Tags.Elements { + tags = append(tags, e.String) + } + var updatedBy string + if c.UpdatedBy != nil { + updatedBy = *c.UpdatedBy + } + var updatedAt time.Time + if c.UpdatedAt.Valid { + updatedAt = c.UpdatedAt.Time + } + + return mfclients.Client{ + ID: c.ID, + Name: c.Name, + Tags: tags, + Owner: c.Owner, + Credentials: mfclients.Credentials{ + Identity: c.Identity, + Secret: c.Secret, + }, + Metadata: metadata, + CreatedAt: c.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: c.Status, + }, nil +} + +func pageQuery(pm mfclients.Page) (string, error) { + mq, _, err := postgres.CreateMetadataQuery("", pm.Metadata) + if err != nil { + return "", errors.Wrap(errors.ErrViewEntity, err) + } + var query []string + var emq string + if mq != "" { + query = append(query, mq) + } + if len(pm.IDs) != 0 { + query = append(query, fmt.Sprintf("id IN ('%s')", strings.Join(pm.IDs, "','"))) + } + if pm.Name != "" { + query = append(query, fmt.Sprintf("c.name = '%s'", pm.Name)) + } + if pm.Tag != "" { + query = append(query, fmt.Sprintf("'%s' = ANY(c.tags)", pm.Tag)) + } + if pm.Status != mfclients.AllStatus { + query = append(query, fmt.Sprintf("c.status = %d", pm.Status)) + } + // For listing clients that the specified client owns but not sharedby + if pm.Owner != "" && pm.SharedBy == "" { + query = append(query, fmt.Sprintf("c.owner_id = '%s'", pm.Owner)) + } + + // For listing clients that the specified client owns and that are shared with the specified client + if pm.Owner != "" && pm.SharedBy != "" { + query = append(query, fmt.Sprintf("(c.owner_id = '%s' OR c.id IN (SELECT subject FROM policies WHERE object IN (SELECT object FROM policies WHERE subject = '%s' AND '%s'=ANY(actions))))", pm.Owner, pm.SharedBy, pm.Action)) + } + // For listing clients that the specified client is shared with + if pm.SharedBy != "" && pm.Owner == "" { + query = append(query, fmt.Sprintf("c.owner_id != '%s' AND (c.id IN (SELECT subject FROM policies WHERE object IN (SELECT object FROM policies WHERE subject = '%s' AND '%s'=ANY(actions))))", pm.SharedBy, pm.SharedBy, pm.Action)) + } + if len(query) > 0 { + emq = fmt.Sprintf("WHERE %s", strings.Join(query, " AND ")) + } + return emq, nil + +} + +func toDBClientsPage(pm mfclients.Page) (dbClientsPage, error) { + _, data, err := postgres.CreateMetadataQuery("", pm.Metadata) + if err != nil { + return dbClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + return dbClientsPage{ + Name: pm.Name, + Metadata: data, + Owner: pm.Owner, + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + Status: pm.Status, + Tag: pm.Tag, + }, nil +} + +type dbClientsPage struct { + GroupID string `db:"group_id"` + Name string `db:"name"` + Owner string `db:"owner_id"` + Identity string `db:"identity"` + Metadata []byte `db:"metadata"` + Tag string `db:"tag"` + Status mfclients.Status `db:"status"` + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` +} diff --git a/things/clients/postgres/clients_test.go b/things/clients/postgres/clients_test.go new file mode 100644 index 0000000000..8f65c6670a --- /dev/null +++ b/things/clients/postgres/clients_test.go @@ -0,0 +1,903 @@ +package postgres_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/things/clients/postgres" + gpostgres "github.com/mainflux/mainflux/things/groups/postgres" + "github.com/mainflux/mainflux/things/policies" + ppostgres "github.com/mainflux/mainflux/things/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const maxNameSize = 1024 + +var ( + idProvider = uuid.New() + invalidName = strings.Repeat("m", maxNameSize+10) + clientIdentity = "client-identity@example.com" + clientName = "client name" + wrongName = "wrong-name" + wrongID = "wrong-id" +) + +func TestClientsSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "add new client successfully", + client: mfclients.Client{ + ID: uid, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "add new client with an owner", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: uid, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: "withowner-client@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "add client with invalid client id", + client: mfclients.Client{ + ID: invalidName, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: "invalidid-client@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "add client with invalid client name", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: invalidName, + Credentials: mfclients.Credentials{ + Identity: "invalidname-client@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "add client with invalid client owner", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: invalidName, + Credentials: mfclients.Credentials{ + Identity: "invalidowner-client@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "add client with invalid client identity", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: invalidName, + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "add client with a missing client identity", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + }, + err: nil, + }, + { + desc: "add client with a missing client secret", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "missing-client-secret@example.com", + Secret: "", + }, + Metadata: mfclients.Metadata{}, + }, + err: nil, + }, + } + for _, tc := range cases { + rClient, err := repo.Save(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + rClient[0].Credentials.Secret = tc.client.Credentials.Secret + assert.Equal(t, tc.client, rClient[0], fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.client, rClient[0])) + } + } +} + +func TestClientsRetrieveByID(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + + _, err := repo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + ID string + err error + }{ + "retrieve existing client": {client.ID, nil}, + "retrieve non-existing client": {wrongID, errors.ErrNotFound}, + } + + for desc, tc := range cases { + cli, err := repo.RetrieveByID(context.Background(), tc.ID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + if err == nil { + assert.Equal(t, client.ID, cli.ID, fmt.Sprintf("retrieve client by ID : client ID : expected %s got %s\n", client.ID, cli.ID)) + assert.Equal(t, client.Name, cli.Name, fmt.Sprintf("retrieve client by ID : client Name : expected %s got %s\n", client.Name, cli.Name)) + assert.Equal(t, client.Credentials.Identity, cli.Credentials.Identity, fmt.Sprintf("retrieve client by ID : client Identity : expected %s got %s\n", client.Credentials.Identity, cli.Credentials.Identity)) + assert.Equal(t, client.Status, cli.Status, fmt.Sprintf("retrieve client by ID : client Status : expected %d got %d\n", client.Status, cli.Status)) + } + } +} + +func TestClientsRetrieveAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + prepo := ppostgres.NewRepository(database) + + var nClients = uint64(200) + var ownerID string + + meta := mfclients.Metadata{ + "admin": "true", + } + wrongMeta := mfclients.Metadata{ + "admin": "false", + } + var expectedClients = []mfclients.Client{} + + var sharedGroup = mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "shared-group", + } + _, err := grepo.Save(context.Background(), sharedGroup) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + for i := uint64(0); i < nClients; i++ { + identity := fmt.Sprintf("TestRetrieveAll%d@example.com", i) + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + if i == 1 { + ownerID = client.ID + } + if i%10 == 0 { + client.Owner = ownerID + client.Metadata = meta + client.Tags = []string{"Test"} + } + if i%50 == 0 { + client.Status = mfclients.DisabledStatus + } + _, err := repo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + expectedClients = append(expectedClients, client) + var policy = policies.Policy{ + Subject: client.ID, + Object: sharedGroup.ID, + Actions: []string{"c_list"}, + } + _, err = prepo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + + cases := map[string]struct { + size uint64 + pm mfclients.Page + response []mfclients.Client + }{ + "retrieve all clients empty page": { + pm: mfclients.Page{}, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients, + size: 200, + }, + "retrieve all clients with limit": { + pm: mfclients.Page{ + Offset: 0, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[0:50], + size: 50, + }, + "retrieve all clients with offset": { + pm: mfclients.Page{ + Offset: 50, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients[50:200], + size: 150, + }, + "retrieve all clients with limit and offset": { + pm: mfclients.Page{ + Offset: 50, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[50:100], + size: 50, + }, + "retrieve all clients with limit and offset not full": { + pm: mfclients.Page{ + Offset: 170, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[170:200], + size: 30, + }, + "retrieve all clients by metadata": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Metadata: meta, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 20, + }, + "retrieve clients by wrong metadata": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Metadata: wrongMeta, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by name": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Name: "TestRetrieveAll3@example.com", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[3]}, + size: 1, + }, + "retrieve clients by wrong name": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Name: wrongName, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by owner": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Owner: ownerID, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 19, + }, + "retrieve clients by wrong owner": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Owner: wrongID, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by disabled status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: mfclients.DisabledStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[50], expectedClients[100], expectedClients[150]}, + size: 4, + }, + "retrieve all clients by combined status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients, + size: 200, + }, + "retrieve clients by the wrong status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: 10, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by tags": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Tag: "Test", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 20, + }, + "retrieve clients by wrong tags": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Tag: "wrongTags", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + } + for desc, tc := range cases { + page, err := repo.RetrieveAll(context.Background(), tc.pm) + size := uint64(len(page.Clients)) + assert.ElementsMatch(t, page.Clients, tc.response, fmt.Sprintf("%s: expected %v got %v\n", desc, tc.response, page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestClientsUpdateMetadata(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{ + "name": "enabled-client", + }, + Tags: []string{"enabled", "tag1"}, + Status: mfclients.EnabledStatus, + } + + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client", + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{ + "name": "disabled-client", + }, + Tags: []string{"disabled", "tag1"}, + Status: mfclients.DisabledStatus, + } + + _, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with metadata: expected %v got %s\n", nil, err)) + _, err = repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + + ucases := []struct { + desc string + update string + client mfclients.Client + err error + }{ + { + desc: "update metadata for enabled client", + update: "metadata", + client: mfclients.Client{ + ID: client1.ID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: nil, + }, + { + desc: "update metadata for disabled client", + update: "metadata", + client: mfclients.Client{ + ID: client2.ID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update name for enabled client", + update: "name", + client: mfclients.Client{ + ID: client1.ID, + Name: "updated name", + }, + err: nil, + }, + { + desc: "update name for disabled client", + update: "name", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name", + }, + err: errors.ErrNotFound, + }, + { + desc: "update name and metadata for enabled client", + update: "both", + client: mfclients.Client{ + ID: client1.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: nil, + }, + { + desc: "update name and metadata for a disabled client", + update: "both", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update metadata for invalid client", + update: "metadata", + client: mfclients.Client{ + ID: wrongID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update name for invalid client", + update: "name", + client: mfclients.Client{ + ID: wrongID, + Name: "updated name", + }, + err: errors.ErrNotFound, + }, + { + desc: "update name and metadata for invalid client", + update: "both", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.Update(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + if tc.client.Name != "" { + assert.Equal(t, expected.Name, tc.client.Name, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.Name, tc.client.Name)) + } + if tc.client.Metadata != nil { + assert.Equal(t, expected.Metadata, tc.client.Metadata, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.Metadata, tc.client.Metadata)) + } + + } + } +} + +func TestClientsUpdateTags(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client-with-tags", + Credentials: mfclients.Credentials{ + Identity: "client1-update-tags@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Tags: []string{"test", "enabled"}, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client-with-tags", + Credentials: mfclients.Credentials{ + Identity: "client2-update-tags@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Tags: []string{"test", "disabled"}, + Status: mfclients.DisabledStatus, + } + + _, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with tags: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, client1.ID, fmt.Sprintf("add new client with tags: expected %v got %s\n", nil, err)) + } + _, err = repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client with tags: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, client2.ID, fmt.Sprintf("add new disabled client with tags: expected %v got %s\n", nil, err)) + } + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update tags for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Tags: []string{"updated"}, + }, + err: nil, + }, + { + desc: "update tags for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Tags: []string{"updated"}, + }, + err: errors.ErrNotFound, + }, + { + desc: "update tags for invalid client", + client: mfclients.Client{ + ID: wrongID, + Tags: []string{"updated"}, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.UpdateTags(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Tags, expected.Tags, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Tags, expected.Tags)) + } + } +} + +func TestClientsUpdateSecret(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client", + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.DisabledStatus, + } + + rClient1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, rClient1[0].ID, fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + } + rClient2, err := repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, rClient2[0].ID, fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + } + + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update secret for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: "newpassword", + }, + }, + err: nil, + }, + { + desc: "update secret for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: "newpassword", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update secret for invalid client", + client: mfclients.Client{ + ID: wrongID, + Credentials: mfclients.Credentials{ + Identity: "client3-update@example.com", + Secret: "newpassword", + }, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + _, err := repo.UpdateSecret(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + c, err := repo.RetrieveByID(context.Background(), tc.client.ID) + require.Nil(t, err, fmt.Sprintf("retrieve client by id during update of secret unexpected error: %s", err)) + assert.Equal(t, tc.client.Credentials.Secret, c.Credentials.Secret, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Credentials.Secret, c.Credentials.Secret)) + } + } +} + +func TestClientsUpdateOwner(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client-with-owner", + Credentials: mfclients.Credentials{ + Identity: "client1-update-owner@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Owner: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client-with-owner", + Credentials: mfclients.Credentials{ + Identity: "client2-update-owner@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Owner: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.DisabledStatus, + } + + _, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with owner: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, client1.ID, fmt.Sprintf("add new client with owner: expected %v got %s\n", nil, err)) + } + _, err = repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client with owner: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, client2.ID, fmt.Sprintf("add new disabled client with owner: expected %v got %s\n", nil, err)) + } + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update owner for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: nil, + }, + { + desc: "update owner for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: errors.ErrNotFound, + }, + { + desc: "update owner for invalid client", + client: mfclients.Client{ + ID: wrongID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.UpdateOwner(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Owner, expected.Owner, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Owner, expected.Owner)) + } + } +} + +func TestClientsChangeStatus(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + + _, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "change client status for an enabled client", + client: mfclients.Client{ + ID: client1.ID, + Status: 0, + }, + err: nil, + }, + { + desc: "change client status for a disabled client", + client: mfclients.Client{ + ID: client1.ID, + Status: 1, + }, + err: nil, + }, + { + desc: "change client status for non-existing client", + client: mfclients.Client{ + ID: "invalid", + Status: 2, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range ucases { + expected, err := repo.ChangeStatus(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Status, expected.Status, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.client.Status, expected.Status)) + } + } +} diff --git a/things/clients/postgres/doc.go b/things/clients/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/things/clients/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/things/clients/postgres/setup_test.go b/things/clients/postgres/setup_test.go new file mode 100644 index 0000000000..fa731d4e46 --- /dev/null +++ b/things/clients/postgres/setup_test.go @@ -0,0 +1,96 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + cpostgres "github.com/mainflux/mainflux/things/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgClient.SetupDB(dbConfig, *cpostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + if db, err = pgClient.Connect(dbConfig); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/things/redis/doc.go b/things/clients/redis/doc.go similarity index 100% rename from things/redis/doc.go rename to things/clients/redis/doc.go diff --git a/things/clients/redis/events.go b/things/clients/redis/events.go new file mode 100644 index 0000000000..596e4d8eb4 --- /dev/null +++ b/things/clients/redis/events.go @@ -0,0 +1,317 @@ +package redis + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +const ( + clientPrefix = "thing." + clientCreate = clientPrefix + "create" + clientUpdate = clientPrefix + "update" + clientRemove = clientPrefix + "remove" + clientShare = clientPrefix + "share" + clientView = clientPrefix + "view" + clientList = clientPrefix + "list" + clientListByGroup = clientPrefix + "list_by_group" + clientIdentify = clientPrefix + "identify" +) + +type event interface { + Encode() (map[string]interface{}, error) +} + +var ( + _ event = (*createClientEvent)(nil) + _ event = (*updateClientEvent)(nil) + _ event = (*removeClientEvent)(nil) + _ event = (*shareClientEvent)(nil) + _ event = (*viewClientEvent)(nil) + _ event = (*listClientEvent)(nil) + _ event = (*listClientByGroupEvent)(nil) + _ event = (*identifyClientEvent)(nil) +) + +type createClientEvent struct { + mfclients.Client +} + +func (cce createClientEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": clientCreate, + "id": cce.ID, + "status": cce.Status.String(), + "created_at": cce.CreatedAt, + } + + if cce.Name != "" { + val["name"] = cce.Name + } + if len(cce.Tags) > 0 { + tags := fmt.Sprintf("[%s]", strings.Join(cce.Tags, ",")) + val["tags"] = tags + } + if cce.Owner != "" { + val["owner"] = cce.Owner + } + if cce.Metadata != nil { + metadata, err := json.Marshal(cce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if cce.Credentials.Identity != "" { + val["identity"] = cce.Credentials.Identity + } + + return val, nil +} + +type updateClientEvent struct { + mfclients.Client + operation string +} + +func (uce updateClientEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": clientUpdate + "." + uce.operation, + "updated_at": uce.UpdatedAt, + "updated_by": uce.UpdatedBy, + } + + if uce.ID != "" { + val["id"] = uce.ID + } + if uce.Name != "" { + val["name"] = uce.Name + } + if len(uce.Tags) > 0 { + tags := fmt.Sprintf("[%s]", strings.Join(uce.Tags, ",")) + val["tags"] = tags + } + if uce.Credentials.Identity != "" { + val["identity"] = uce.Credentials.Identity + } + if uce.Metadata != nil { + metadata, err := json.Marshal(uce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if !uce.CreatedAt.IsZero() { + val["created_at"] = uce.CreatedAt + } + if uce.Status.String() != "" { + val["status"] = uce.Status.String() + } + + return val, nil +} + +type removeClientEvent struct { + id string + status string + updatedAt time.Time + updatedBy string +} + +func (rce removeClientEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": clientRemove, + "id": rce.id, + "status": rce.status, + "updated_at": rce.updatedAt, + "updated_by": rce.updatedBy, + }, nil +} + +type shareClientEvent struct { + thingID string + actions string + userIDs string +} + +func (sce shareClientEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": clientShare, + "thing_id": sce.thingID, + "actions": sce.actions, + "user_ids": sce.userIDs, + }, nil +} + +type viewClientEvent struct { + mfclients.Client +} + +func (vce viewClientEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": clientView, + "id": vce.ID, + } + + if vce.Name != "" { + val["name"] = vce.Name + } + if len(vce.Tags) > 0 { + tags := fmt.Sprintf("[%s]", strings.Join(vce.Tags, ",")) + val["tags"] = tags + } + if vce.Owner != "" { + val["owner"] = vce.Owner + } + if vce.Credentials.Identity != "" { + val["identity"] = vce.Credentials.Identity + } + if vce.Metadata != nil { + metadata, err := json.Marshal(vce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if !vce.CreatedAt.IsZero() { + val["created_at"] = vce.CreatedAt + } + if !vce.UpdatedAt.IsZero() { + val["updated_at"] = vce.UpdatedAt + } + if vce.UpdatedBy != "" { + val["updated_by"] = vce.UpdatedBy + } + if vce.Status.String() != "" { + val["status"] = vce.Status.String() + } + + return val, nil +} + +type listClientEvent struct { + mfclients.Page +} + +func (lce listClientEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": clientList, + "total": lce.Total, + "offset": lce.Offset, + "limit": lce.Limit, + } + + if lce.Name != "" { + val["name"] = lce.Name + } + if lce.Order != "" { + val["order"] = lce.Order + } + if lce.Dir != "" { + val["dir"] = lce.Dir + } + if lce.Metadata != nil { + metadata, err := json.Marshal(lce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if lce.Owner != "" { + val["owner"] = lce.Owner + } + if lce.Tag != "" { + val["tag"] = lce.Tag + } + if lce.SharedBy != "" { + val["sharedBy"] = lce.SharedBy + } + if lce.Status.String() != "" { + val["status"] = lce.Status.String() + } + if lce.Action != "" { + val["action"] = lce.Action + } + if lce.Subject != "" { + val["subject"] = lce.Subject + } + if lce.Identity != "" { + val["identity"] = lce.Identity + } + + return val, nil +} + +type listClientByGroupEvent struct { + mfclients.Page + channelID string +} + +func (lcge listClientByGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": clientListByGroup, + "total": lcge.Total, + "offset": lcge.Offset, + "limit": lcge.Limit, + "channel_id": lcge.channelID, + } + + if lcge.Name != "" { + val["name"] = lcge.Name + } + if lcge.Order != "" { + val["order"] = lcge.Order + } + if lcge.Dir != "" { + val["dir"] = lcge.Dir + } + if lcge.Metadata != nil { + metadata, err := json.Marshal(lcge.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if lcge.Owner != "" { + val["owner"] = lcge.Owner + } + if lcge.Tag != "" { + val["tag"] = lcge.Tag + } + if lcge.SharedBy != "" { + val["sharedBy"] = lcge.SharedBy + } + if lcge.Status.String() != "" { + val["status"] = lcge.Status.String() + } + if lcge.Action != "" { + val["action"] = lcge.Action + } + if lcge.Subject != "" { + val["subject"] = lcge.Subject + } + if lcge.Identity != "" { + val["identity"] = lcge.Identity + } + + return val, nil +} + +type identifyClientEvent struct { + thingID string +} + +func (ice identifyClientEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": clientIdentify, + "thing_id": ice.thingID, + }, nil +} diff --git a/things/clients/redis/streams.go b/things/clients/redis/streams.go new file mode 100644 index 0000000000..af9c6664d7 --- /dev/null +++ b/things/clients/redis/streams.go @@ -0,0 +1,279 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import ( + "context" + "fmt" + "strings" + + "github.com/go-redis/redis/v8" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/things/clients" +) + +const ( + streamID = "mainflux.things" + streamLen = 1000 +) + +var _ clients.Service = (*eventStore)(nil) + +type eventStore struct { + svc clients.Service + client *redis.Client +} + +// NewEventStoreMiddleware returns wrapper around things service that sends +// events to event store. +func NewEventStoreMiddleware(svc clients.Service, client *redis.Client) clients.Service { + return eventStore{ + svc: svc, + client: client, + } +} + +func (es eventStore) CreateThings(ctx context.Context, token string, thing ...mfclients.Client) ([]mfclients.Client, error) { + sths, err := es.svc.CreateThings(ctx, token, thing...) + if err != nil { + return sths, err + } + + for _, th := range sths { + event := createClientEvent{ + th, + } + values, err := event.Encode() + if err != nil { + return sths, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLen: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return sths, err + } + } + return sths, nil +} + +func (es eventStore) UpdateClient(ctx context.Context, token string, thing mfclients.Client) (mfclients.Client, error) { + cli, err := es.svc.UpdateClient(ctx, token, thing) + if err != nil { + return mfclients.Client{}, err + } + + return es.update(ctx, "", cli) +} + +func (es eventStore) UpdateClientOwner(ctx context.Context, token string, thing mfclients.Client) (mfclients.Client, error) { + cli, err := es.svc.UpdateClientOwner(ctx, token, thing) + if err != nil { + return mfclients.Client{}, err + } + + return es.update(ctx, "owner", cli) +} + +func (es eventStore) UpdateClientTags(ctx context.Context, token string, thing mfclients.Client) (mfclients.Client, error) { + cli, err := es.svc.UpdateClientTags(ctx, token, thing) + if err != nil { + return mfclients.Client{}, err + } + + return es.update(ctx, "tags", cli) +} + +func (es eventStore) UpdateClientSecret(ctx context.Context, token, id, key string) (mfclients.Client, error) { + cli, err := es.svc.UpdateClientSecret(ctx, token, id, key) + if err != nil { + return mfclients.Client{}, err + } + + return es.update(ctx, "secret", cli) +} + +func (es eventStore) update(ctx context.Context, operation string, cli mfclients.Client) (mfclients.Client, error) { + event := updateClientEvent{ + cli, operation, + } + values, err := event.Encode() + if err != nil { + return cli, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return cli, err + } + + return cli, nil +} + +func (es eventStore) ShareClient(ctx context.Context, token, thingID string, actions, userIDs []string) error { + if err := es.svc.ShareClient(ctx, token, thingID, actions, userIDs); err != nil { + return err + } + event := shareClientEvent{ + thingID: thingID, + actions: fmt.Sprintf("[%s]", strings.Join(actions, ",")), + userIDs: fmt.Sprintf("[%s]", strings.Join(userIDs, ",")), + } + values, err := event.Encode() + if err != nil { + return err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return err + } + + return nil +} + +func (es eventStore) ViewClient(ctx context.Context, token, id string) (mfclients.Client, error) { + cli, err := es.svc.ViewClient(ctx, token, id) + if err != nil { + return mfclients.Client{}, err + } + event := viewClientEvent{ + cli, + } + values, err := event.Encode() + if err != nil { + return cli, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return cli, err + } + + return cli, nil +} + +func (es eventStore) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + cp, err := es.svc.ListClients(ctx, token, pm) + if err != nil { + return mfclients.ClientsPage{}, err + } + event := listClientEvent{ + pm, + } + values, err := event.Encode() + if err != nil { + return cp, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return cp, err + } + + return cp, nil +} + +func (es eventStore) ListClientsByGroup(ctx context.Context, token, chID string, pm mfclients.Page) (mfclients.MembersPage, error) { + cp, err := es.svc.ListClientsByGroup(ctx, token, chID, pm) + if err != nil { + return mfclients.MembersPage{}, err + } + event := listClientByGroupEvent{ + pm, chID, + } + values, err := event.Encode() + if err != nil { + return cp, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return cp, err + } + + return cp, nil +} + +func (es eventStore) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + cli, err := es.svc.EnableClient(ctx, token, id) + if err != nil { + return mfclients.Client{}, err + } + + return es.delete(ctx, cli) +} + +func (es eventStore) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + cli, err := es.svc.DisableClient(ctx, token, id) + if err != nil { + return mfclients.Client{}, err + } + + return es.delete(ctx, cli) +} + +func (es eventStore) delete(ctx context.Context, cli mfclients.Client) (mfclients.Client, error) { + event := removeClientEvent{ + id: cli.ID, + updatedAt: cli.UpdatedAt, + updatedBy: cli.UpdatedBy, + status: cli.Status.String(), + } + values, err := event.Encode() + if err != nil { + return cli, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return cli, err + } + + return cli, nil +} + +func (es eventStore) Identify(ctx context.Context, key string) (string, error) { + thingID, err := es.svc.Identify(ctx, key) + if err != nil { + return "", err + } + event := identifyClientEvent{ + thingID: thingID, + } + values, err := event.Encode() + if err != nil { + return thingID, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return thingID, err + } + + return thingID, nil +} diff --git a/things/clients/redis/things.go b/things/clients/redis/things.go new file mode 100644 index 0000000000..c4e9cc8b33 --- /dev/null +++ b/things/clients/redis/things.go @@ -0,0 +1,74 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import ( + "context" + "fmt" + + "github.com/go-redis/redis/v8" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" +) + +const ( + keyPrefix = "thing_key" + idPrefix = "thing_id" +) + +var _ clients.ClientCache = (*thingCache)(nil) + +type thingCache struct { + client *redis.Client +} + +// NewCache returns redis thing cache implementation. +func NewCache(client *redis.Client) clients.ClientCache { + return &thingCache{ + client: client, + } +} + +func (tc *thingCache) Save(ctx context.Context, thingKey string, thingID string) error { + tkey := fmt.Sprintf("%s:%s", keyPrefix, thingKey) + if err := tc.client.Set(ctx, tkey, thingID, 0).Err(); err != nil { + return errors.Wrap(errors.ErrCreateEntity, err) + } + + tid := fmt.Sprintf("%s:%s", idPrefix, thingID) + if err := tc.client.Set(ctx, tid, thingKey, 0).Err(); err != nil { + return errors.Wrap(errors.ErrCreateEntity, err) + } + return nil +} + +func (tc *thingCache) ID(ctx context.Context, thingKey string) (string, error) { + tkey := fmt.Sprintf("%s:%s", keyPrefix, thingKey) + thingID, err := tc.client.Get(ctx, tkey).Result() + if err != nil { + return "", errors.Wrap(errors.ErrNotFound, err) + } + if thingID == "" { + return "", errors.ErrNotFound + } + return thingID, nil +} + +func (tc *thingCache) Remove(ctx context.Context, thingID string) error { + tid := fmt.Sprintf("%s:%s", idPrefix, thingID) + key, err := tc.client.Get(ctx, tid).Result() + // Redis returns Nil Reply when key does not exist. + if err == redis.Nil { + return nil + } + if err != nil { + return errors.Wrap(errors.ErrRemoveEntity, err) + } + + tkey := fmt.Sprintf("%s:%s", keyPrefix, key) + if err := tc.client.Del(ctx, tkey, tid).Err(); err != nil { + return errors.Wrap(errors.ErrRemoveEntity, err) + } + return nil +} diff --git a/things/clients/service.go b/things/clients/service.go new file mode 100644 index 0000000000..0cc215cc9e --- /dev/null +++ b/things/clients/service.go @@ -0,0 +1,325 @@ +package clients + +import ( + "context" + "fmt" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" +) + +const ( + MyKey = "mine" + thingsObjectKey = "things" + createKey = "c_add" + updateRelationKey = "c_update" + listRelationKey = "c_list" + deleteRelationKey = "c_delete" + entityType = "group" +) + +var AdminRelationKey = []string{createKey, updateRelationKey, listRelationKey, deleteRelationKey} + +type service struct { + auth policies.AuthServiceClient + clients mfclients.Repository + clientCache ClientCache + idProvider mainflux.IDProvider +} + +// NewService returns a new Clients service implementation. +func NewService(auth policies.AuthServiceClient, c mfclients.Repository, tcache ClientCache, idp mainflux.IDProvider) Service { + return service{ + auth: auth, + clients: c, + clientCache: tcache, + idProvider: idp, + } +} + +func (svc service) CreateThings(ctx context.Context, token string, clis ...mfclients.Client) ([]mfclients.Client, error) { + res, err := svc.auth.Identify(ctx, &policies.Token{Value: token}) + if err != nil { + return []mfclients.Client{}, err + } + var clients []mfclients.Client + for _, cli := range clis { + if cli.ID == "" { + clientID, err := svc.idProvider.ID() + if err != nil { + return []mfclients.Client{}, err + } + cli.ID = clientID + } + if cli.Credentials.Secret == "" { + key, err := svc.idProvider.ID() + if err != nil { + return []mfclients.Client{}, err + } + cli.Credentials.Secret = key + } + if cli.Owner == "" { + cli.Owner = res.GetId() + } + if cli.Status != mfclients.DisabledStatus && cli.Status != mfclients.EnabledStatus { + return []mfclients.Client{}, apiutil.ErrInvalidStatus + } + cli.CreatedAt = time.Now() + cli.UpdatedAt = cli.CreatedAt + cli.UpdatedBy = cli.Owner + clients = append(clients, cli) + } + return svc.clients.Save(ctx, clients...) +} + +func (svc service) ViewClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, id, listRelationKey); err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + } + return svc.clients.RetrieveByID(ctx, id) +} + +func (svc service) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.ClientsPage{}, err + } + + // If the user is admin, fetch all things from database. + if err := svc.authorize(ctx, userID, thingsObjectKey, listRelationKey); err == nil { + pm.Owner = "" + pm.SharedBy = "" + return svc.clients.RetrieveAll(ctx, pm) + } + + // If the user is not admin, check 'sharedby' parameter from page metadata. + // If user provides 'sharedby' key, fetch things from policies. Otherwise, + // fetch things from the database based on thing's 'owner' field. + if pm.SharedBy == MyKey { + pm.SharedBy = userID + } + if pm.Owner == MyKey { + pm.Owner = userID + } + pm.Action = "c_list" + + return svc.clients.RetrieveAll(ctx, pm) +} + +func (svc service) UpdateClient(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, cli.ID, updateRelationKey); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Name: cli.Name, + Metadata: cli.Metadata, + UpdatedAt: time.Now(), + UpdatedBy: userID, + } + + return svc.clients.Update(ctx, client) +} + +func (svc service) UpdateClientTags(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, cli.ID, updateRelationKey); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Tags: cli.Tags, + UpdatedAt: time.Now(), + UpdatedBy: userID, + } + + return svc.clients.UpdateTags(ctx, client) +} + +func (svc service) UpdateClientSecret(ctx context.Context, token, id, key string) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, id, updateRelationKey); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: id, + Credentials: mfclients.Credentials{ + Secret: key, + }, + UpdatedAt: time.Now(), + UpdatedBy: userID, + } + + return svc.clients.UpdateSecret(ctx, client) +} + +func (svc service) UpdateClientOwner(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, cli.ID, updateRelationKey); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Owner: cli.Owner, + UpdatedAt: time.Now(), + UpdatedBy: userID, + } + + return svc.clients.UpdateOwner(ctx, client) +} + +func (svc service) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + client := mfclients.Client{ + ID: id, + Status: mfclients.EnabledStatus, + UpdatedAt: time.Now(), + } + client, err := svc.changeClientStatus(ctx, token, client) + if err != nil { + return mfclients.Client{}, errors.Wrap(mfclients.ErrEnableClient, err) + } + + return client, nil +} + +func (svc service) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + client := mfclients.Client{ + ID: id, + Status: mfclients.DisabledStatus, + UpdatedAt: time.Now(), + } + client, err := svc.changeClientStatus(ctx, token, client) + if err != nil { + return mfclients.Client{}, errors.Wrap(mfclients.ErrDisableClient, err) + } + + if err := svc.clientCache.Remove(ctx, client.ID); err != nil { + return client, err + } + + return client, nil +} + +func (svc service) ListClientsByGroup(ctx context.Context, token, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.MembersPage{}, err + } + // If the user is admin, fetch all things connected to the channel. + if err := svc.authorize(ctx, token, thingsObjectKey, listRelationKey); err == nil { + return svc.clients.Members(ctx, groupID, pm) + } + pm.Owner = userID + + return svc.clients.Members(ctx, groupID, pm) +} + +func (svc service) Identify(ctx context.Context, key string) (string, error) { + id, err := svc.clientCache.ID(ctx, key) + if err == nil { + return id, nil + } + client, err := svc.clients.RetrieveBySecret(ctx, key) + if err != nil { + return "", err + } + if err := svc.clientCache.Save(ctx, key, client.ID); err != nil { + return "", err + } + return client.ID, nil +} + +func (svc service) changeClientStatus(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, userID, client.ID, deleteRelationKey); err != nil { + return mfclients.Client{}, err + } + dbClient, err := svc.clients.RetrieveByID(ctx, client.ID) + if err != nil { + return mfclients.Client{}, err + } + if dbClient.Status == client.Status { + return mfclients.Client{}, mfclients.ErrStatusAlreadyAssigned + } + client.UpdatedBy = userID + return svc.clients.ChangeStatus(ctx, client) +} + +func (svc service) identifyUser(ctx context.Context, token string) (string, error) { + req := &policies.Token{Value: token} + res, err := svc.auth.Identify(ctx, req) + if err != nil { + return "", errors.Wrap(errors.ErrAuthorization, err) + } + return res.GetId(), nil +} + +func (svc service) authorize(ctx context.Context, subject, object string, relation string) error { + // Check if the client is the owner of the thing. + dbThing, err := svc.clients.RetrieveByID(ctx, object) + if err != nil { + return err + } + if dbThing.Owner == subject { + return nil + } + req := &policies.AuthorizeReq{ + Sub: subject, + Obj: object, + Act: relation, + EntityType: entityType, + } + res, err := svc.auth.Authorize(ctx, req) + if err != nil { + return errors.Wrap(errors.ErrAuthorization, err) + } + if !res.GetAuthorized() { + return errors.ErrAuthorization + } + return nil +} + +func (svc service) ShareClient(ctx context.Context, token, clientID string, actions, userIDs []string) error { + if err := svc.authorize(ctx, token, clientID, updateRelationKey); err != nil { + return err + } + var errs error + for _, userID := range userIDs { + apr, err := svc.auth.AddPolicy(ctx, &policies.AddPolicyReq{Token: token, Sub: userID, Obj: clientID, Act: actions}) + if err != nil { + errs = errors.Wrap(fmt.Errorf("cannot claim ownership on object '%s' by user '%s': %s", clientID, userID, err), errs) + } + if !apr.GetAuthorized() { + errs = errors.Wrap(fmt.Errorf("cannot claim ownership on object '%s' by user '%s': unauthorized", clientID, userID), errs) + } + } + return errs +} diff --git a/things/clients/service_test.go b/things/clients/service_test.go new file mode 100644 index 0000000000..9e979e0fd5 --- /dev/null +++ b/things/clients/service_test.go @@ -0,0 +1,1131 @@ +package clients_test + +import ( + context "context" + fmt "fmt" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/clients/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + secret = "strongsecret" + validCMetadata = mfclients.Metadata{"role": "client"} + ID = testsutil.GenerateUUID(&testing.T{}, idProvider) + client = mfclients.Client{ + ID: ID, + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validCMetadata, + Status: mfclients.EnabledStatus, + } + inValidToken = "invalidToken" + withinDuration = 5 * time.Second + adminEmail = "admin@example.com" + token = "token" +) + +func newService(tokens map[string]string) (clients.Service, *mocks.ClientRepository) { + adminPolicy := mocks.MockSubjectSet{Object: ID, Relation: clients.AdminRelationKey} + auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{adminEmail: {adminPolicy}}) + thingCache := mocks.NewClientCache() + idProvider := uuid.NewMock() + cRepo := new(mocks.ClientRepository) + + return clients.NewService(auth, cRepo, thingCache, idProvider), cRepo +} + +func TestRegisterClient(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + client mfclients.Client + token string + err error + }{ + { + desc: "register new client", + client: client, + token: token, + err: nil, + }, + { + desc: "register existing client", + client: client, + token: token, + err: errors.ErrConflict, + }, + { + desc: "register a new enabled client with name", + client: mfclients.Client{ + Name: "clientWithName", + Credentials: mfclients.Credentials{ + Identity: "newclientwithname@example.com", + Secret: secret, + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new disabled client with name", + client: mfclients.Client{ + Name: "clientWithName", + Credentials: mfclients.Credentials{ + Identity: "newclientwithname@example.com", + Secret: secret, + }, + }, + err: nil, + token: token, + }, + { + desc: "register a new enabled client with tags", + client: mfclients.Client{ + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithtags@example.com", + Secret: secret, + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new disabled client with tags", + client: mfclients.Client{ + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithtags@example.com", + Secret: secret, + }, + Status: mfclients.DisabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new enabled client with metadata", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithmetadata@example.com", + Secret: secret, + }, + Metadata: validCMetadata, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new disabled client with metadata", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithmetadata@example.com", + Secret: secret, + }, + Metadata: validCMetadata, + }, + err: nil, + token: token, + }, + { + desc: "register a new disabled client", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithvalidstatus@example.com", + Secret: secret, + }, + }, + err: nil, + token: token, + }, + { + desc: "register a new client with valid disabled status", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithvalidstatus@example.com", + Secret: secret, + }, + Status: mfclients.DisabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new client with all fields", + client: mfclients.Client{ + Name: "newclientwithallfields", + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithallfields@example.com", + Secret: secret, + }, + Metadata: mfclients.Metadata{ + "name": "newclientwithallfields", + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: token, + }, + { + desc: "register a new client with missing identity", + client: mfclients.Client{ + Name: "clientWithMissingIdentity", + Credentials: mfclients.Credentials{ + Secret: secret, + }, + }, + err: errors.ErrMalformedEntity, + token: token, + }, + { + desc: "register a new client with invalid owner", + client: mfclients.Client{ + Owner: mocks.WrongID, + Credentials: mfclients.Credentials{ + Identity: "newclientwithinvalidowner@example.com", + Secret: secret, + }, + }, + err: errors.ErrMalformedEntity, + token: token, + }, + { + desc: "register a new client with empty secret", + client: mfclients.Client{ + Owner: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "newclientwithemptysecret@example.com", + }, + }, + err: apiutil.ErrMissingSecret, + token: token, + }, + { + desc: "register a new client with invalid status", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithinvalidstatus@example.com", + Secret: secret, + }, + Status: mfclients.AllStatus, + }, + err: apiutil.ErrInvalidStatus, + token: token, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("Save", context.Background(), mock.Anything).Return(&mfclients.Client{}, tc.err) + registerTime := time.Now() + expected, err := svc.CreateThings(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, expected[0].ID, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, expected[0].ID)) + assert.WithinDuration(t, expected[0].CreatedAt, registerTime, withinDuration, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected[0].CreatedAt, registerTime)) + tc.client.ID = expected[0].ID + tc.client.CreatedAt = expected[0].CreatedAt + tc.client.UpdatedAt = expected[0].UpdatedAt + tc.client.Credentials.Secret = expected[0].Credentials.Secret + tc.client.Owner = expected[0].Owner + tc.client.UpdatedBy = expected[0].UpdatedBy + assert.Equal(t, tc.client, expected[0], fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.client, expected[0])) + } + repoCall.Unset() + } +} + +func TestViewClient(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + token string + clientID string + response mfclients.Client + err error + }{ + { + desc: "view client successfully", + response: client, + token: token, + clientID: client.ID, + err: nil, + }, + { + desc: "view client with an invalid token", + response: mfclients.Client{}, + token: inValidToken, + clientID: "", + err: errors.ErrAuthorization, + }, + { + desc: "view client with valid token and invalid client id", + response: mfclients.Client{}, + token: token, + clientID: mocks.WrongID, + err: errors.ErrNotFound, + }, + { + desc: "view client with an invalid token and invalid client id", + response: mfclients.Client{}, + token: inValidToken, + clientID: mocks.WrongID, + err: errors.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall1 := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.response, tc.err) + rClient, err := svc.ViewClient(context.Background(), tc.token, tc.clientID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, rClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, rClient)) + repoCall1.Unset() + } +} + +func TestListClients(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + var nClients = uint64(200) + var aClients = []mfclients.Client{} + var OwnerID = testsutil.GenerateUUID(t, idProvider) + for i := uint64(1); i < nClients; i++ { + identity := fmt.Sprintf("TestListClients_%d@example.com", i) + client := mfclients.Client{ + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: "password", + }, + Tags: []string{"tag1", "tag2"}, + Metadata: mfclients.Metadata{"role": "client"}, + } + if i%50 == 0 { + client.Owner = OwnerID + client.Owner = testsutil.GenerateUUID(t, idProvider) + } + aClients = append(aClients, client) + } + + cases := []struct { + desc string + token string + page mfclients.Page + response mfclients.ClientsPage + size uint64 + err error + }{ + { + desc: "list clients with authorized token", + token: token, + + page: mfclients.Page{ + Status: mfclients.AllStatus, + }, + size: 0, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + err: nil, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + page: mfclients.Page{ + Status: mfclients.AllStatus, + }, + size: 0, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients that are shared with me", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that are shared with me with a specific name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that are shared with me with an invalid name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients that I own", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own with a specific name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own with an invalid name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients that I own and are shared with me", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + SharedBy: clients.MyKey, + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own and are shared with me with a specific name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Owner: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own and are shared with me with an invalid name", + token: token, + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Owner: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients with offset and limit", + token: token, + + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: nClients - 6, + Offset: 0, + Limit: 0, + }, + Clients: aClients[6:nClients], + }, + size: nClients - 6, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, tc.err) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, tc.err) + page, err := svc.ListClients(context.Background(), tc.token, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClient(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + client1 := client + client2 := client + client1.Name = "Updated client" + client2.Metadata = mfclients.Metadata{"role": "test"} + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client name with valid token", + client: client1, + response: client1, + token: token, + err: nil, + }, + { + desc: "update client name with invalid token", + client: client1, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthorization, + }, + { + desc: "update client name with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Name: "Updated Client", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthorization, + }, + { + desc: "update client metadata with valid token", + client: client2, + response: client2, + token: token, + err: nil, + }, + { + desc: "update client metadata with invalid token", + client: client2, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, tc.err) + repoCall1 := cRepo.On("Update", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClient(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestUpdateClientTags(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + client.Tags = []string{"updated"} + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client tags with valid token", + client: client, + token: token, + response: client, + err: nil, + }, + { + desc: "update client tags with invalid token", + client: client, + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthorization, + }, + { + desc: "update client name with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Name: "Updated name", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, tc.err) + repoCall1 := cRepo.On("UpdateTags", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientTags(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestUpdateClientOwner(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + client.Owner = "newowner@mail.com" + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client owner with valid token", + client: client, + token: token, + response: client, + err: nil, + }, + { + desc: "update client owner with invalid token", + client: client, + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthorization, + }, + { + desc: "update client owner with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Owner: "updatedowner@mail.com", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, tc.err) + repoCall1 := cRepo.On("UpdateOwner", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientOwner(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestUpdateClientSecret(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + id string + newSecret string + token string + response mfclients.Client + err error + }{ + { + desc: "update client secret with valid token", + id: client.ID, + newSecret: "newSecret", + token: token, + response: client, + err: nil, + }, + { + desc: "update client secret with invalid token", + id: client.ID, + newSecret: "newPassword", + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthorization, + }, + } + + for _, tc := range cases { + repoCall1 := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.response, tc.err) + repoCall2 := cRepo.On("RetrieveByIdentity", context.Background(), mock.Anything).Return(tc.response, tc.err) + repoCall3 := cRepo.On("UpdateSecret", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientSecret(context.Background(), tc.token, tc.id, tc.newSecret) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + repoCall1.Unset() + repoCall2.Unset() + repoCall3.Unset() + } +} + +func TestEnableClient(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + enabledClient1 := mfclients.Client{ID: ID, Credentials: mfclients.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus} + disabledClient1 := mfclients.Client{ID: ID, Credentials: mfclients.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus} + endisabledClient1 := disabledClient1 + endisabledClient1.Status = mfclients.EnabledStatus + + cases := []struct { + desc string + id string + token string + client mfclients.Client + response mfclients.Client + err error + }{ + { + desc: "enable disabled client", + id: disabledClient1.ID, + token: token, + client: disabledClient1, + response: endisabledClient1, + err: nil, + }, + { + desc: "enable enabled client", + id: enabledClient1.ID, + token: token, + client: enabledClient1, + response: enabledClient1, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "enable non-existing client", + id: mocks.WrongID, + token: token, + client: mfclients.Client{}, + response: mfclients.Client{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall1 := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.client, tc.err) + repoCall2 := cRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.EnableClient(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall1.Unset() + repoCall2.Unset() + } + + cases2 := []struct { + desc string + status mfclients.Status + size uint64 + response mfclients.ClientsPage + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus, + size: 2, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, endisabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus, + size: 1, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus, + size: 3, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, disabledClient1, endisabledClient1}, + }, + }, + } + + for _, tc := range cases2 { + pm := mfclients.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + } + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, nil) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListClients(context.Background(), token, pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestDisableClient(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + enabledClient1 := mfclients.Client{ID: ID, Credentials: mfclients.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus} + disabledClient1 := mfclients.Client{ID: ID, Credentials: mfclients.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus} + disenabledClient1 := enabledClient1 + disenabledClient1.Status = mfclients.DisabledStatus + + cases := []struct { + desc string + id string + token string + client mfclients.Client + response mfclients.Client + err error + }{ + { + desc: "disable enabled client", + id: enabledClient1.ID, + token: token, + client: enabledClient1, + response: disenabledClient1, + err: nil, + }, + { + desc: "disable disabled client", + id: disabledClient1.ID, + token: token, + client: disabledClient1, + response: mfclients.Client{}, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "disable non-existing client", + id: mocks.WrongID, + client: mfclients.Client{}, + token: token, + response: mfclients.Client{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + _ = cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.client, tc.err) + repoCall1 := cRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.DisableClient(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall1.Unset() + } + + cases2 := []struct { + desc string + status mfclients.Status + size uint64 + response mfclients.ClientsPage + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus, + size: 1, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus, + size: 2, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{disenabledClient1, disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus, + size: 3, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, disabledClient1, disenabledClient1}, + }, + }, + } + + for _, tc := range cases2 { + pm := mfclients.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + } + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, nil) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListClients(context.Background(), token, pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestListMembers(t *testing.T) { + svc, cRepo := newService(map[string]string{token: adminEmail}) + + var nClients = uint64(10) + var aClients = []mfclients.Client{} + for i := uint64(1); i < nClients; i++ { + identity := fmt.Sprintf("member_%d@example.com", i) + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: "password", + }, + Tags: []string{"tag1", "tag2"}, + Metadata: mfclients.Metadata{"role": "client"}, + } + aClients = append(aClients, client) + } + validToken := token + + cases := []struct { + desc string + token string + groupID string + page mfclients.Page + response mfclients.MembersPage + err error + }{ + { + desc: "list clients with authorized token", + token: validToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Subject: adminEmail, + Owner: adminEmail, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Members: []mfclients.Client{}, + }, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: validToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Status: mfclients.AllStatus, + Subject: adminEmail, + Owner: adminEmail, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: nClients - 6 - 1, + }, + Members: aClients[6 : nClients-1], + }, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Subject: adminEmail, + Action: "g_list", + Owner: adminEmail, + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients with an invalid id", + token: validToken, + groupID: mocks.WrongID, + page: mfclients.Page{ + Subject: adminEmail, + Action: "g_list", + Owner: adminEmail, + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfclients.Client{}, tc.err) + repoCall1 := cRepo.On("Members", context.Background(), tc.groupID, tc.page).Return(tc.response, tc.err) + page, err := svc.ListClientsByGroup(context.Background(), tc.token, tc.groupID, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + repoCall.Unset() + repoCall1.Unset() + } +} diff --git a/things/standalone/doc.go b/things/clients/standalone/doc.go similarity index 100% rename from things/standalone/doc.go rename to things/clients/standalone/doc.go diff --git a/things/clients/standalone/standalone.go b/things/clients/standalone/standalone.go new file mode 100644 index 0000000000..1e762239cf --- /dev/null +++ b/things/clients/standalone/standalone.go @@ -0,0 +1,66 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package standalone + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" + "google.golang.org/grpc" +) + +var errUnsupported = errors.New("not supported in standalone mode") + +var _ policies.AuthServiceClient = (*singleUserRepo)(nil) + +type singleUserRepo struct { + id string + token string +} + +// NewAuthService creates single user repository for constrained environments. +func NewAuthService(id, token string) policies.AuthServiceClient { + return singleUserRepo{ + id: id, + token: token, + } +} + +func (repo singleUserRepo) Issue(ctx context.Context, req *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { + return &policies.Token{}, errUnsupported +} + +func (repo singleUserRepo) Identify(ctx context.Context, token *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { + if repo.token != token.GetValue() { + return nil, errors.ErrAuthentication + } + + return &policies.UserIdentity{Id: repo.id}, nil +} + +func (repo singleUserRepo) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { + if repo.id != req.GetSub() { + return &policies.AuthorizeRes{}, errors.ErrAuthorization + } + return &policies.AuthorizeRes{Authorized: true}, nil +} + +func (repo singleUserRepo) AddPolicy(ctx context.Context, req *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { + if repo.token != req.GetToken() { + return &policies.AddPolicyRes{}, errors.ErrAuthorization + } + return &policies.AddPolicyRes{Authorized: true}, nil +} + +func (repo singleUserRepo) DeletePolicy(ctx context.Context, req *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { + if repo.token != req.GetToken() { + return &policies.DeletePolicyRes{}, errors.ErrAuthorization + } + return &policies.DeletePolicyRes{Deleted: true}, nil +} + +func (repo singleUserRepo) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { + return &policies.ListPoliciesRes{}, errUnsupported +} diff --git a/things/clients/tracing/tracing.go b/things/clients/tracing/tracing.go new file mode 100644 index 0000000000..091e6d5279 --- /dev/null +++ b/things/clients/tracing/tracing.go @@ -0,0 +1,103 @@ +package tracing + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/things/clients" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ clients.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + svc clients.Service +} + +func TracingMiddleware(svc clients.Service, tracer trace.Tracer) clients.Service { + return &tracingMiddleware{tracer, svc} +} + +func (tm *tracingMiddleware) CreateThings(ctx context.Context, token string, clis ...mfclients.Client) ([]mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_create_client") + defer span.End() + + return tm.svc.CreateThings(ctx, token, clis...) +} + +func (tm *tracingMiddleware) ViewClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_client", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + return tm.svc.ViewClient(ctx, token, id) +} + +func (tm *tracingMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_clients") + defer span.End() + return tm.svc.ListClients(ctx, token, pm) +} + +func (tm *tracingMiddleware) UpdateClient(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_name_and_metadata", trace.WithAttributes(attribute.String("name", cli.Name))) + defer span.End() + + return tm.svc.UpdateClient(ctx, token, cli) +} + +func (tm *tracingMiddleware) UpdateClientTags(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_tags", trace.WithAttributes(attribute.StringSlice("tags", cli.Tags))) + defer span.End() + + return tm.svc.UpdateClientTags(ctx, token, cli) +} + +func (tm *tracingMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_secret") + defer span.End() + + return tm.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) + +} + +func (tm *tracingMiddleware) UpdateClientOwner(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_owner", trace.WithAttributes(attribute.StringSlice("tags", cli.Tags))) + defer span.End() + + return tm.svc.UpdateClientOwner(ctx, token, cli) +} + +func (tm *tracingMiddleware) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_enable_client", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.svc.EnableClient(ctx, token, id) +} + +func (tm *tracingMiddleware) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_disable_client", trace.WithAttributes(attribute.String("id", id))) + defer span.End() + + return tm.svc.DisableClient(ctx, token, id) +} + +func (tm *tracingMiddleware) ListClientsByGroup(ctx context.Context, token, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_things_by_channel") + defer span.End() + + return tm.svc.ListClientsByGroup(ctx, token, groupID, pm) + +} + +func (tm *tracingMiddleware) ShareClient(ctx context.Context, token string, thingID string, actions, userIDs []string) error { + ctx, span := tm.tracer.Start(ctx, "svc_view_client", trace.WithAttributes(attribute.String("id", thingID))) + defer span.End() + return tm.svc.ShareClient(ctx, token, thingID, actions, userIDs) +} + +func (tm *tracingMiddleware) Identify(ctx context.Context, key string) (string, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_client", trace.WithAttributes(attribute.String("key", key))) + defer span.End() + return tm.svc.Identify(ctx, key) +} diff --git a/things/doc.go b/things/doc.go deleted file mode 100644 index 1206d3c56a..0000000000 --- a/things/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package things contains the domain concept definitions needed to support -// Mainflux things service functionality. -package things diff --git a/things/groups/api/endpoints.go b/things/groups/api/endpoints.go new file mode 100644 index 0000000000..2a4f20385e --- /dev/null +++ b/things/groups/api/endpoints.go @@ -0,0 +1,223 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" +) + +func createGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createGroupReq) + if err := req.validate(); err != nil { + return createGroupRes{}, err + } + + group, err := svc.CreateGroups(ctx, req.token, req.Group) + if err != nil { + return createGroupRes{}, err + } + + return createGroupRes{created: true, Group: group[0]}, nil + } +} + +func createGroupsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createGroupsReq) + if err := req.validate(); err != nil { + return groupPageRes{}, err + } + + gs, err := svc.CreateGroups(ctx, req.token, req.Groups...) + if err != nil { + return groupPageRes{}, err + } + return buildGroupsResponse(mfgroups.GroupsPage{Groups: gs}), nil + } +} + +func viewGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(groupReq) + if err := req.validate(); err != nil { + return viewGroupRes{}, err + } + + group, err := svc.ViewGroup(ctx, req.token, req.id) + if err != nil { + return viewGroupRes{}, err + } + + return viewGroupRes{Group: group}, nil + } +} + +func updateGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateGroupReq) + if err := req.validate(); err != nil { + return updateGroupRes{}, err + } + + group := mfgroups.Group{ + ID: req.id, + Name: req.Name, + Description: req.Description, + Metadata: req.Metadata, + } + + group, err := svc.UpdateGroup(ctx, req.token, group) + if err != nil { + return updateGroupRes{}, err + } + + return updateGroupRes{Group: group}, nil + } +} + +func enableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + group, err := svc.EnableGroup(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return changeStatusRes{Group: group}, nil + } +} + +func disableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + group, err := svc.DisableGroup(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return changeStatusRes{Group: group}, nil + } +} + +func listGroupsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listGroupsReq) + if err := req.validate(); err != nil { + return groupPageRes{}, err + } + page, err := svc.ListGroups(ctx, req.token, req.GroupsPage) + if err != nil { + return groupPageRes{}, err + } + + if req.tree { + return buildGroupsResponseTree(page), nil + } + + return buildGroupsResponse(page), nil + } +} + +func listMembershipsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMembershipReq) + if err := req.validate(); err != nil { + return membershipPageRes{}, err + } + + page, err := svc.ListMemberships(ctx, req.token, req.clientID, req.GroupsPage) + if err != nil { + return membershipPageRes{}, err + } + + res := membershipPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Memberships: []viewMembershipRes{}, + } + for _, g := range page.Memberships { + res.Memberships = append(res.Memberships, viewMembershipRes{Group: g}) + } + + return res, nil + } +} + +func buildGroupsResponseTree(page mfgroups.GroupsPage) groupPageRes { + groupsMap := map[string]*mfgroups.Group{} + // Parents' map keeps its array of children. + parentsMap := map[string][]*mfgroups.Group{} + for i := range page.Groups { + if _, ok := groupsMap[page.Groups[i].ID]; !ok { + groupsMap[page.Groups[i].ID] = &page.Groups[i] + parentsMap[page.Groups[i].ID] = make([]*mfgroups.Group, 0) + } + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.Parent]; ok { + children = append(children, group) + parentsMap[group.Parent] = children + } + } + + res := groupPageRes{ + pageRes: pageRes{ + Limit: page.Limit, + Offset: page.Offset, + Total: page.Total, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.ID]; ok { + group.Children = children + } + + } + + for _, group := range groupsMap { + view := toViewGroupRes(*group) + if children, ok := parentsMap[group.Parent]; len(children) == 0 || !ok { + res.Groups = append(res.Groups, view) + } + } + + return res +} + +func toViewGroupRes(group mfgroups.Group) viewGroupRes { + view := viewGroupRes{ + Group: group, + } + return view +} + +func buildGroupsResponse(gp mfgroups.GroupsPage) groupPageRes { + res := groupPageRes{ + pageRes: pageRes{ + Total: gp.Total, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range gp.Groups { + view := viewGroupRes{ + Group: group, + } + res.Groups = append(res.Groups, view) + } + + return res +} diff --git a/things/groups/api/logging.go b/things/groups/api/logging.go new file mode 100644 index 0000000000..9f87177526 --- /dev/null +++ b/things/groups/api/logging.go @@ -0,0 +1,106 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" +) + +var _ groups.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc groups.Service +} + +func LoggingMiddleware(svc groups.Service, logger mflog.Logger) groups.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) CreateGroups(ctx context.Context, token string, group ...mfgroups.Group) (rGroup []mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method create_channel for %d channels using token %s took %s to complete", len(group), token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.CreateGroups(ctx, token, group...) +} + +func (lm *loggingMiddleware) UpdateGroup(ctx context.Context, token string, group mfgroups.Group) (rGroup mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_channel for channel with id %s using token %s took %s to complete", group.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateGroup(ctx, token, group) +} + +func (lm *loggingMiddleware) ViewGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method view_channel for channel with id %s using token %s took %s to complete", g.Name, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ViewGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) ListGroups(ctx context.Context, token string, gp mfgroups.GroupsPage) (cg mfgroups.GroupsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_channels using token %s took %s to complete", token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListGroups(ctx, token, gp) +} + +func (lm *loggingMiddleware) EnableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method enable_channel for channel with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.EnableGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) DisableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method disable_channel for channel with id %s using token %s took %s to complete", id, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DisableGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) ListMemberships(ctx context.Context, token, thingID string, cp mfgroups.GroupsPage) (mp mfgroups.MembershipsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_channels_by_thing for thing with id %s using token %s took %s to complete", thingID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListMemberships(ctx, token, thingID, cp) +} diff --git a/things/groups/api/metrics.go b/things/groups/api/metrics.go new file mode 100644 index 0000000000..6ac9640595 --- /dev/null +++ b/things/groups/api/metrics.go @@ -0,0 +1,83 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" +) + +var _ groups.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc groups.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc groups.Service, counter metrics.Counter, latency metrics.Histogram) groups.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) CreateGroups(ctx context.Context, token string, g ...mfgroups.Group) ([]mfgroups.Group, error) { + defer func(begin time.Time) { + ms.counter.With("method", "create_channel").Add(1) + ms.latency.With("method", "create_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.CreateGroups(ctx, token, g...) +} + +func (ms *metricsMiddleware) UpdateGroup(ctx context.Context, token string, group mfgroups.Group) (rGroup mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_channel").Add(1) + ms.latency.With("method", "update_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateGroup(ctx, token, group) +} + +func (ms *metricsMiddleware) ViewGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_channel").Add(1) + ms.latency.With("method", "view_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) ListGroups(ctx context.Context, token string, gp mfgroups.GroupsPage) (cg mfgroups.GroupsPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_channels").Add(1) + ms.latency.With("method", "list_channels").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListGroups(ctx, token, gp) +} + +func (ms *metricsMiddleware) EnableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "enable_channel").Add(1) + ms.latency.With("method", "enable_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.EnableGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) DisableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "disable_channel").Add(1) + ms.latency.With("method", "disable_channel").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DisableGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) ListMemberships(ctx context.Context, token, clientID string, gp mfgroups.GroupsPage) (mp mfgroups.MembershipsPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_channels_by_thing").Add(1) + ms.latency.With("method", "list_channels_by_thing").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListMemberships(ctx, token, clientID, gp) +} diff --git a/things/groups/api/requests.go b/things/groups/api/requests.go new file mode 100644 index 0000000000..867f36fa68 --- /dev/null +++ b/things/groups/api/requests.go @@ -0,0 +1,148 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +type createGroupReq struct { + mfgroups.Group + token string +} + +func (req createGroupReq) validate() error { + if len(req.Name) > api.MaxNameSize || req.Name == "" { + return apiutil.ErrNameSize + } + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + + // Do the validation only if request contains ID + if req.ID != "" { + return api.ValidateUUID(req.ID) + } + return nil +} + +type createGroupsReq struct { + token string + Groups []mfgroups.Group +} + +func (req createGroupsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if len(req.Groups) <= 0 { + return apiutil.ErrEmptyList + } + + for _, channel := range req.Groups { + if channel.ID != "" { + if err := api.ValidateUUID(channel.ID); err != nil { + return err + } + } + if len(channel.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + } + + return nil +} + +type updateGroupReq struct { + token string + id string + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (req updateGroupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.id == "" { + return apiutil.ErrMissingID + } + + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + return nil +} + +type listGroupsReq struct { + mfgroups.GroupsPage + token string + // - `true` - result is JSON tree representing groups hierarchy, + // - `false` - result is JSON array of groups. + tree bool +} + +func (req listGroupsReq) validate() error { + if req.Level < mfgroups.MinLevel || req.Level > mfgroups.MaxLevel { + return apiutil.ErrInvalidLevel + } + + return nil +} + +type listMembershipReq struct { + mfgroups.GroupsPage + token string + clientID string +} + +func (req listMembershipReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.clientID == "" { + return apiutil.ErrMissingID + } + + if req.Limit > api.MaxLimitSize || req.Limit < 1 { + return apiutil.ErrLimitSize + } + + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + + return nil +} + +type groupReq struct { + token string + id string +} + +func (req groupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type changeGroupStatusReq struct { + token string + id string +} + +func (req changeGroupStatusReq) validate() error { + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} diff --git a/things/groups/api/responses.go b/things/groups/api/responses.go new file mode 100644 index 0000000000..91953dde99 --- /dev/null +++ b/things/groups/api/responses.go @@ -0,0 +1,150 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/mainflux/mainflux" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +var ( + _ mainflux.Response = (*viewMembershipRes)(nil) + _ mainflux.Response = (*membershipPageRes)(nil) + _ mainflux.Response = (*createGroupRes)(nil) + _ mainflux.Response = (*groupPageRes)(nil) + _ mainflux.Response = (*changeStatusRes)(nil) + _ mainflux.Response = (*viewGroupRes)(nil) + _ mainflux.Response = (*updateGroupRes)(nil) +) + +type viewMembershipRes struct { + mfgroups.Group `json:",inline"` +} + +func (res viewMembershipRes) Code() int { + return http.StatusOK +} + +func (res viewMembershipRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewMembershipRes) Empty() bool { + return false +} + +type membershipPageRes struct { + pageRes + Memberships []viewMembershipRes `json:"channels"` +} + +func (res membershipPageRes) Code() int { + return http.StatusOK +} + +func (res membershipPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res membershipPageRes) Empty() bool { + return false +} + +type viewGroupRes struct { + mfgroups.Group `json:",inline"` +} + +func (res viewGroupRes) Code() int { + return http.StatusOK +} + +func (res viewGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewGroupRes) Empty() bool { + return false +} + +type createGroupRes struct { + mfgroups.Group `json:",inline"` + created bool +} + +func (res createGroupRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res createGroupRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/channels/%s", res.ID), + } + } + + return map[string]string{} +} + +func (res createGroupRes) Empty() bool { + return false +} + +type groupPageRes struct { + pageRes + Groups []viewGroupRes `json:"channels"` +} + +type pageRes struct { + Limit uint64 `json:"limit,omitempty"` + Offset uint64 `json:"offset,omitempty"` + Total uint64 `json:"total,omitempty"` +} + +func (res groupPageRes) Code() int { + return http.StatusOK +} + +func (res groupPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res groupPageRes) Empty() bool { + return false +} + +type updateGroupRes struct { + mfgroups.Group `json:",inline"` +} + +func (res updateGroupRes) Code() int { + return http.StatusOK +} + +func (res updateGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updateGroupRes) Empty() bool { + return false +} + +type changeStatusRes struct { + mfgroups.Group `json:",inline"` +} + +func (res changeStatusRes) Code() int { + return http.StatusOK +} + +func (res changeStatusRes) Headers() map[string]string { + return map[string]string{} +} + +func (res changeStatusRes) Empty() bool { + return false +} diff --git a/things/groups/api/transport.go b/things/groups/api/transport.go new file mode 100644 index 0000000000..aa0ecb7cac --- /dev/null +++ b/things/groups/api/transport.go @@ -0,0 +1,268 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakeHandler(svc groups.Service, mux *bone.Mux, logger logger.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + mux.Post("/channels", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("create_channel"))(createGroupEndpoint(svc)), + decodeGroupCreate, + api.EncodeResponse, + opts..., + )) + + mux.Post("/channels/bulk", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("create_channels"))(createGroupsEndpoint(svc)), + decodeGroupsCreate, + api.EncodeResponse, + opts..., + )) + + mux.Get("/channels/:chanID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("view_channel"))(viewGroupEndpoint(svc)), + decodeGroupRequest, + api.EncodeResponse, + opts..., + )) + + mux.Put("/channels/:chanID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_channel"))(updateGroupEndpoint(svc)), + decodeGroupUpdate, + api.EncodeResponse, + opts..., + )) + + mux.Get("/things/:thingID/channels", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_channels_by_thing"))(listMembershipsEndpoint(svc)), + decodeListMembershipRequest, + api.EncodeResponse, + opts..., + )) + + mux.Get("/channels", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_channels"))(listGroupsEndpoint(svc)), + decodeListGroupsRequest, + api.EncodeResponse, + opts..., + )) + + mux.Post("/channels/:chanID/enable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("enable_channel"))(enableGroupEndpoint(svc)), + decodeChangeGroupStatus, + api.EncodeResponse, + opts..., + )) + + mux.Post("/channels/:chanID/disable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disable_channel"))(disableGroupEndpoint(svc)), + decodeChangeGroupStatus, + api.EncodeResponse, + opts..., + )) + return mux +} + +func decodeListMembershipRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + parentID, err := apiutil.ReadStringQuery(r, api.ParentKey, "") + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + dir, err := apiutil.ReadNumQuery[int64](r, api.DirKey, -1) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listMembershipReq{ + token: apiutil.ExtractBearerToken(r), + clientID: bone.GetValue(r, "thingID"), + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: parentID, + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: dir, + }, + } + return req, nil + +} + +func decodeListGroupsRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + parentID, err := apiutil.ReadStringQuery(r, api.ParentKey, "") + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, err + } + dir, err := apiutil.ReadNumQuery[int64](r, api.DirKey, -1) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: parentID, + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: dir, + }, + } + return req, nil +} + +func decodeGroupCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + var g mfgroups.Group + if err := json.NewDecoder(r.Body).Decode(&g); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + req := createGroupReq{ + Group: g, + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func decodeGroupsCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := createGroupsReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req.Groups); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeGroupUpdate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateGroupReq{ + id: bone.GetValue(r, "chanID"), + token: apiutil.ExtractBearerToken(r), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + return req, nil +} + +func decodeGroupRequest(_ context.Context, r *http.Request) (interface{}, error) { + req := groupReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "chanID"), + } + return req, nil +} + +func decodeChangeGroupStatus(_ context.Context, r *http.Request) (interface{}, error) { + req := changeGroupStatusReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "chanID"), + } + + return req, nil +} diff --git a/things/groups/groups.go b/things/groups/groups.go new file mode 100644 index 0000000000..9bcbec9310 --- /dev/null +++ b/things/groups/groups.go @@ -0,0 +1,32 @@ +package groups + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/groups" +) + +// Service specifies an API that must be fulfilled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type Service interface { + // CreateGroup creates new group. + CreateGroups(ctx context.Context, token string, gs ...groups.Group) ([]groups.Group, error) + + // UpdateGroup updates the group identified by the provided ID. + UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) + + // ViewGroup retrieves data about the group identified by ID. + ViewGroup(ctx context.Context, token, id string) (groups.Group, error) + + // ListGroups retrieves groups. + ListGroups(ctx context.Context, token string, gm groups.GroupsPage) (groups.GroupsPage, error) + + // ListMemberships retrieves everything that is assigned to a group identified by clientID. + ListMemberships(ctx context.Context, token, clientID string, gm groups.GroupsPage) (groups.MembershipsPage, error) + + // EnableGroup logically enables the group identified with the provided ID. + EnableGroup(ctx context.Context, token, id string) (groups.Group, error) + + // DisableGroup logically disables the group identified with the provided ID. + DisableGroup(ctx context.Context, token, id string) (groups.Group, error) +} diff --git a/things/groups/mocks/groups.go b/things/groups/mocks/groups.go new file mode 100644 index 0000000000..2ac974a984 --- /dev/null +++ b/things/groups/mocks/groups.go @@ -0,0 +1,77 @@ +package mocks + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/stretchr/testify/mock" +) + +const WrongID = "wrongID" + +var _ mfgroups.Repository = (*GroupRepository)(nil) + +type GroupRepository struct { + mock.Mock +} + +func (m *GroupRepository) ChangeStatus(ctx context.Context, group mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, group) + + if group.ID == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + if group.Status != mfclients.EnabledStatus && group.Status != mfclients.DisabledStatus { + return mfgroups.Group{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} + +func (m *GroupRepository) Memberships(ctx context.Context, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + ret := m.Called(ctx, clientID, gm) + + if clientID == WrongID { + return mfgroups.MembershipsPage{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.MembershipsPage), ret.Error(1) +} + +func (m *GroupRepository) RetrieveAll(ctx context.Context, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + ret := m.Called(ctx, gm) + + return ret.Get(0).(mfgroups.GroupsPage), ret.Error(1) +} + +func (m *GroupRepository) RetrieveByID(ctx context.Context, id string) (mfgroups.Group, error) { + ret := m.Called(ctx, id) + if id == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} + +func (m *GroupRepository) Save(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, g) + if g.Parent == WrongID { + return mfgroups.Group{}, errors.ErrCreateEntity + } + if g.Owner == WrongID { + return mfgroups.Group{}, errors.ErrCreateEntity + } + + return g, ret.Error(1) +} + +func (m *GroupRepository) Update(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, g) + if g.ID == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} diff --git a/things/groups/postgres/doc.go b/things/groups/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/things/groups/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/things/groups/postgres/groups.go b/things/groups/postgres/groups.go new file mode 100644 index 0000000000..5603f81d10 --- /dev/null +++ b/things/groups/postgres/groups.go @@ -0,0 +1,435 @@ +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/jmoiron/sqlx" + "github.com/mainflux/mainflux/internal/postgres" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +var _ mfgroups.Repository = (*grepo)(nil) + +type grepo struct { + db postgres.Database +} + +// NewRepository instantiates a PostgreSQL implementation of group +// repository. +func NewRepository(db postgres.Database) mfgroups.Repository { + return &grepo{ + db: db, + } +} + +// TODO - check parent group write access. +func (repo grepo) Save(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + q := `INSERT INTO groups (name, description, id, owner_id, metadata, created_at, updated_at, updated_by, status) + VALUES (:name, :description, :id, :owner_id, :metadata, :created_at, :updated_at, :updated_by, :status) + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status;` + if g.Parent != "" { + q = `INSERT INTO groups (name, description, id, owner_id, parent_id, metadata, created_at, updated_at, updated_by, status) + VALUES (:name, :description, :id, :owner_id, :parent_id, :metadata, :created_at, :updated_at, :updated_by, :status) + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status;` + } + dbg, err := toDBGroup(g) + if err != nil { + return mfgroups.Group{}, err + } + row, err := repo.db.NamedQueryContext(ctx, q, dbg) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrCreateEntity) + } + + defer row.Close() + row.Next() + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mfgroups.Group{}, err + } + + return toGroup(dbg) +} + +func (repo grepo) RetrieveByID(ctx context.Context, id string) (mfgroups.Group, error) { + dbu := dbGroup{ + ID: id, + } + q := `SELECT id, name, owner_id, COALESCE(parent_id, '') AS parent_id, description, metadata, created_at, updated_at, updated_by, status FROM groups + WHERE id = $1` + if err := repo.db.QueryRowxContext(ctx, q, dbu.ID).StructScan(&dbu); err != nil { + if err == sql.ErrNoRows { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, err) + + } + return mfgroups.Group{}, errors.Wrap(errors.ErrViewEntity, err) + } + return toGroup(dbu) +} + +func (repo grepo) RetrieveAll(ctx context.Context, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + var q string + query, err := buildQuery(gm) + if err != nil { + return mfgroups.GroupsPage{}, err + } + + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT DISTINCT g.id, g.owner_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + q = fmt.Sprintf("%s %s ORDER BY g.updated_at LIMIT :limit OFFSET :offset;", q, query) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + defer rows.Close() + + items, err := repo.processRows(rows) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + + cq := "SELECT COUNT(*) FROM groups g" + if query != "" { + cq = fmt.Sprintf(" %s %s", cq, query) + } + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + + page := gm + page.Groups = items + page.Total = total + + return page, nil +} + +func (repo grepo) Memberships(ctx context.Context, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + var q string + query, err := buildQuery(gm) + if err != nil { + return mfgroups.MembershipsPage{}, err + } + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT g.id, g.owner_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + aq := "" + // If not admin, the client needs to have a g_list action on the group + if gm.Subject != "" { + aq = "AND policies.object IN (SELECT object FROM policies WHERE subject = :subject AND :action=ANY(actions))" + } + q = fmt.Sprintf(`%s INNER JOIN policies ON g.id=policies.object %s AND policies.subject = :client_id %s + ORDER BY g.updated_at LIMIT :limit OFFSET :offset;`, q, query, aq) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + dbPage.ClientID = clientID + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + defer rows.Close() + + var items []mfgroups.Group + for rows.Next() { + dbg := dbGroup{} + if err := rows.StructScan(&dbg); err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + group, err := toGroup(dbg) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + items = append(items, group) + } + + cq := fmt.Sprintf(`SELECT COUNT(*) FROM groups g INNER JOIN policies + ON g.id=policies.object %s AND policies.subject = :client_id`, query) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + page := mfgroups.MembershipsPage{ + Memberships: items, + Page: mfgroups.Page{ + Total: total, + }, + } + + return page, nil +} + +func (repo grepo) Update(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + var query []string + var upq string + if g.Name != "" { + query = append(query, "name = :name,") + } + if g.Description != "" { + query = append(query, "description = :description,") + } + if g.Metadata != nil { + query = append(query, "metadata = :metadata,") + } + if len(query) > 0 { + upq = strings.Join(query, " ") + } + g.Status = mfclients.EnabledStatus + q := fmt.Sprintf(`UPDATE groups SET %s updated_at = :updated_at, updated_by = :updated_by + WHERE owner_id = :owner_id AND id = :id AND status = :status + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status`, upq) + + dbu, err := toDBGroup(g) + if err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbu) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbu = dbGroup{} + if err := row.StructScan(&dbu); err != nil { + return mfgroups.Group{}, errors.Wrap(err, errors.ErrUpdateEntity) + } + return toGroup(dbu) +} + +func (repo grepo) ChangeStatus(ctx context.Context, group mfgroups.Group) (mfgroups.Group, error) { + qc := `UPDATE groups SET status = :status WHERE id = :id RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status` + + dbg, err := toDBGroup(group) + if err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, qc, dbg) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mfgroups.Group{}, errors.Wrap(err, errors.ErrUpdateEntity) + } + + return toGroup(dbg) + +} + +func buildHierachy(gm mfgroups.GroupsPage) string { + query := "" + switch { + case gm.Direction >= 0: // ancestors + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, owner_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.owner_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level - 1 from groups x + INNER JOIN groups_cte a ON a.parent_id = x.id + ) SELECT * FROM groups_cte g` + + case gm.Direction < 0: // descendants + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, owner_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level, CONCAT('', '', id) as path from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.owner_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level + 1, CONCAT(path, '.', x.id) as path from groups x + INNER JOIN groups_cte d ON d.id = x.parent_id + ) SELECT * FROM groups_cte g` + } + return query +} +func buildQuery(gm mfgroups.GroupsPage) (string, error) { + queries := []string{} + + if gm.Name != "" { + queries = append(queries, "g.name = :name") + } + if gm.Status != mfclients.AllStatus { + queries = append(queries, "g.status = :status") + } + + if gm.Subject != "" { + queries = append(queries, "(g.owner_id = :owner_id OR id IN (SELECT object as id FROM policies WHERE subject = :subject AND :action=ANY(actions)))") + } + if len(gm.Metadata) > 0 { + queries = append(queries, "'g.metadata @> :metadata'") + } + if len(queries) > 0 { + return fmt.Sprintf("WHERE %s", strings.Join(queries, " AND ")), nil + } + return "", nil +} + +type dbGroup struct { + ID string `db:"id"` + Parent string `db:"parent_id"` + Owner string `db:"owner_id"` + Name string `db:"name"` + Description string `db:"description"` + Level int `db:"level"` + Path string `db:"path,omitempty"` + Metadata []byte `db:"metadata"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` + Status mfclients.Status `db:"status"` +} + +func toDBGroup(g mfgroups.Group) (dbGroup, error) { + data := []byte("{}") + if len(g.Metadata) > 0 { + b, err := json.Marshal(g.Metadata) + if err != nil { + return dbGroup{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + var updatedAt sql.NullTime + if !g.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: g.UpdatedAt, Valid: true} + } + var updatedBy *string + if g.UpdatedBy != "" { + updatedBy = &g.UpdatedBy + } + return dbGroup{ + ID: g.ID, + Name: g.Name, + Parent: g.Parent, + Owner: g.Owner, + Description: g.Description, + Metadata: data, + Path: g.Path, + CreatedAt: g.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: g.Status, + }, nil +} + +func toGroup(g dbGroup) (mfgroups.Group, error) { + var metadata mfclients.Metadata + if g.Metadata != nil { + if err := json.Unmarshal([]byte(g.Metadata), &metadata); err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + var updatedAt time.Time + if g.UpdatedAt.Valid { + updatedAt = g.UpdatedAt.Time + } + var updatedBy string + if g.UpdatedBy != nil { + updatedBy = *g.UpdatedBy + } + + return mfgroups.Group{ + ID: g.ID, + Name: g.Name, + Parent: g.Parent, + Owner: g.Owner, + Description: g.Description, + Metadata: metadata, + Level: g.Level, + Path: g.Path, + UpdatedAt: updatedAt, + CreatedAt: g.CreatedAt, + UpdatedBy: updatedBy, + Status: g.Status, + }, nil +} + +func (gr grepo) processRows(rows *sqlx.Rows) ([]mfgroups.Group, error) { + var items []mfgroups.Group + for rows.Next() { + dbg := dbGroup{} + if err := rows.StructScan(&dbg); err != nil { + return items, err + } + group, err := toGroup(dbg) + if err != nil { + return items, err + } + items = append(items, group) + } + return items, nil +} + +func toDBGroupPage(pm mfgroups.GroupsPage) (dbGroupPage, error) { + level := mfgroups.MaxLevel + if pm.Level < mfgroups.MaxLevel { + level = pm.Level + } + data := []byte("{}") + if len(pm.Metadata) > 0 { + b, err := json.Marshal(pm.Metadata) + if err != nil { + return dbGroupPage{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + return dbGroupPage{ + ID: pm.ID, + Name: pm.Name, + Metadata: data, + Path: pm.Path, + Level: level, + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + ParentID: pm.ID, + Owner: pm.OwnerID, + Subject: pm.Subject, + Action: pm.Action, + Status: pm.Status, + }, nil +} + +type dbGroupPage struct { + ClientID string `db:"client_id"` + ID string `db:"id"` + Name string `db:"name"` + ParentID string `db:"parent_id"` + Owner string `db:"owner_id"` + Metadata []byte `db:"metadata"` + Path string `db:"path"` + Level uint64 `db:"level"` + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + Subject string `db:"subject"` + Action string `db:"action"` + Status mfclients.Status `db:"status"` +} diff --git a/things/groups/postgres/groups_test.go b/things/groups/postgres/groups_test.go new file mode 100644 index 0000000000..a7bcc2005b --- /dev/null +++ b/things/groups/postgres/groups_test.go @@ -0,0 +1,600 @@ +package postgres_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/things/clients/postgres" + gpostgres "github.com/mainflux/mainflux/things/groups/postgres" + "github.com/mainflux/mainflux/things/policies" + ppostgres "github.com/mainflux/mainflux/things/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + maxNameSize = 1024 + maxDescSize = 1024 + maxLevel = uint64(5) + groupName = "group" + description = "description" +) + +var ( + wrongID = "wrong-id" + invalidName = strings.Repeat("m", maxNameSize+10) + validDesc = strings.Repeat("m", 100) + invalidDesc = strings.Repeat("m", maxDescSize+1) + metadata = mfclients.Metadata{ + "admin": "true", + } + idProvider = uuid.New() +) + +func TestGroupSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewRepository(database) + + usrID := testsutil.GenerateUUID(t, idProvider) + grpID := testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "create new group successfully", + group: mfgroups.Group{ + ID: grpID, + Name: groupName, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a new group with an existing name", + group: mfgroups.Group{ + ID: grpID, + Name: groupName, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrConflict, + }, + { + desc: "create group with an invalid name", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: invalidName, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with invalid ID", + group: mfgroups.Group{ + ID: usrID, + Name: "withInvalidDescription", + Description: invalidDesc, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create group with description", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withDescription", + Description: validDesc, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create group with invalid description", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withInvalidDescription", + Description: invalidDesc, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create group with parent", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Parent: grpID, + Name: "withParent", + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a group with an invalid parent", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Parent: invalidName, + Name: "withInvalidParent", + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with an owner", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: usrID, + Name: "withOwner", + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a group with an invalid owner", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: invalidName, + Name: "withInvalidOwner", + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with metadata", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withMetadata", + Metadata: metadata, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + } + + for _, tc := range cases { + _, err := groupRepo.Save(context.Background(), tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + +} + +func TestGroupRetrieveByID(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewRepository(database) + + uid := testsutil.GenerateUUID(t, idProvider) + group1 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: groupName + "TestGroupRetrieveByID1", + Owner: uid, + Status: mfclients.EnabledStatus, + } + + _, err := groupRepo.Save(context.Background(), group1) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + retrieved, err := groupRepo.RetrieveByID(context.Background(), group1.ID) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.True(t, retrieved.ID == group1.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group1.ID, retrieved.ID)) + + // Round to milliseconds as otherwise saving and retrieving from DB + // adds rounding error. + creationTime := time.Now().UTC().Round(time.Millisecond) + group2 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: groupName + "TestGroupRetrieveByID", + Owner: uid, + Parent: group1.ID, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Description: description, + Metadata: metadata, + Status: mfclients.EnabledStatus, + } + + _, err = groupRepo.Save(context.Background(), group2) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + retrieved, err = groupRepo.RetrieveByID(context.Background(), group2.ID) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.True(t, retrieved.ID == group2.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group2.ID, retrieved.ID)) + assert.True(t, retrieved.CreatedAt.Equal(creationTime), fmt.Sprintf("Save group, CreatedAt: expected %s got %s\n", creationTime, retrieved.CreatedAt)) + assert.True(t, retrieved.UpdatedAt.Equal(creationTime), fmt.Sprintf("Save group, UpdatedAt: expected %s got %s\n", creationTime, retrieved.UpdatedAt)) + assert.True(t, retrieved.Parent == group1.ID, fmt.Sprintf("Save group, Level: expected %s got %s\n", group1.ID, retrieved.Parent)) + assert.True(t, retrieved.Description == description, fmt.Sprintf("Save group, Description: expected %v got %v\n", retrieved.Description, description)) + + retrieved, err = groupRepo.RetrieveByID(context.Background(), testsutil.GenerateUUID(t, idProvider)) + assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Retrieve group: expected %s got %s\n", errors.ErrNotFound, err)) +} + +func TestGroupRetrieveAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewRepository(database) + + var nGroups = uint64(200) + var ownerID = testsutil.GenerateUUID(t, idProvider) + var parentID string + for i := uint64(0); i < nGroups; i++ { + creationTime := time.Now().UTC() + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: fmt.Sprintf("%s-%d", groupName, i), + Description: fmt.Sprintf("%s-description-%d", groupName, i), + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.EnabledStatus, + } + if i == 1 { + parentID = group.ID + } + if i%10 == 0 { + group.Owner = ownerID + group.Parent = parentID + } + if i%50 == 0 { + group.Status = mfclients.DisabledStatus + } + _, err := groupRepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + parentID = group.ID + } + + cases := map[string]struct { + Size uint64 + Metadata mfgroups.GroupsPage + }{ + "retrieve all groups": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: nGroups, + }, + "retrieve all groups with offset": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 50, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: nGroups - 50, + }, + "retrieve all groups with limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 0, + Limit: 50, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 50, + }, + "retrieve all groups with offset and limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 50, + Limit: 50, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 50, + }, + "retrieve all groups with offset greater than limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 250, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 0, + }, + "retrieve all groups with owner id": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Limit: nGroups, + Subject: ownerID, + OwnerID: ownerID, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 20, + }, + } + + for desc, tc := range cases { + page, err := groupRepo.RetrieveAll(context.Background(), tc.Metadata) + size := len(page.Groups) + assert.Equal(t, tc.Size, uint64(size), fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.Size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestGroupUpdate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewRepository(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + creationTime := time.Now().UTC() + updateTime := time.Now().UTC() + groupID := testsutil.GenerateUUID(t, idProvider) + + group := mfgroups.Group{ + ID: groupID, + Name: groupName + "TestGroupUpdate", + Owner: uid, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Description: description, + Metadata: metadata, + Status: mfclients.EnabledStatus, + } + updatedName := groupName + "Updated" + updatedMetadata := mfclients.Metadata{"admin": "false"} + updatedDescription := description + "updated" + _, err := groupRepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) + + retrieved, err := groupRepo.RetrieveByID(context.Background(), group.ID) + require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) + + cases := []struct { + desc string + groupUpdate mfgroups.Group + groupExpected mfgroups.Group + err error + }{ + { + desc: "update group name for existing id", + groupUpdate: mfgroups.Group{ + ID: group.ID, + Name: updatedName, + UpdatedAt: updateTime, + Owner: uid, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + Metadata: retrieved.Metadata, + Description: retrieved.Description, + }, + err: nil, + }, + { + desc: "update group metadata for existing id", + groupUpdate: mfgroups.Group{ + ID: group.ID, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Owner: uid, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Description: retrieved.Description, + }, + err: nil, + }, + { + desc: "update group description for existing id", + groupUpdate: mfgroups.Group{ + ID: group.ID, + UpdatedAt: updateTime, + Description: updatedDescription, + Owner: uid, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + Description: updatedDescription, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + }, + err: nil, + }, + { + desc: "update group name and metadata for existing id", + groupUpdate: mfgroups.Group{ + ID: group.ID, + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Owner: uid, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Description: updatedDescription, + }, + err: nil, + }, + { + desc: "update group for invalid name", + groupUpdate: mfgroups.Group{ + ID: group.ID, + Owner: uid, + Name: invalidName, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "update group for invalid description", + groupUpdate: mfgroups.Group{ + ID: group.ID, + Owner: uid, + Description: invalidDesc, + }, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + updated, err := groupRepo.Update(context.Background(), tc.groupUpdate) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.True(t, updated.Name == tc.groupExpected.Name, fmt.Sprintf("%s:Name: expected %s got %s\n", tc.desc, tc.groupExpected.Name, updated.Name)) + assert.True(t, updated.Description == tc.groupExpected.Description, fmt.Sprintf("%s:Description: expected %s got %s\n", tc.desc, tc.groupExpected.Description, updated.Description)) + assert.True(t, updated.Metadata["admin"] == tc.groupExpected.Metadata["admin"], fmt.Sprintf("%s:Metadata: expected %d got %d\n", tc.desc, tc.groupExpected.Metadata["admin"], updated.Metadata["admin"])) + } + } +} + +func TestClientsMemberships(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + prepo := ppostgres.NewRepository(database) + + clientA := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "client-memberships", + Credentials: mfclients.Credentials{ + Identity: "client-memberships1@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + clientB := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "client-memberships", + Credentials: mfclients.Credentials{ + Identity: "client-memberships2@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "group-membership", + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + + policyA := policies.Policy{ + Subject: clientA.ID, + Object: group.ID, + Actions: []string{"g_list"}, + } + policyB := policies.Policy{ + Subject: clientB.ID, + Object: group.ID, + Actions: []string{"g_list"}, + } + + _, err := crepo.Save(context.Background(), clientA) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save client: expected %v got %s\n", nil, err)) + _, err = crepo.Save(context.Background(), clientB) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save client: expected %v got %s\n", nil, err)) + _, err = grepo.Save(context.Background(), group) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save group: expected %v got %s\n", nil, err)) + _, err = prepo.Save(context.Background(), policyA) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save policy: expected %v got %s\n", nil, err)) + _, err = prepo.Save(context.Background(), policyB) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save policy: expected %v got %s\n", nil, err)) + + cases := map[string]struct { + ID string + err error + }{ + "retrieve membership for existing client": {clientA.ID, nil}, + "retrieve membership for non-existing client": {wrongID, nil}, + } + + for desc, tc := range cases { + mp, err := grepo.Memberships(context.Background(), tc.ID, mfgroups.GroupsPage{Page: mfgroups.Page{Total: 10, Offset: 0, Limit: 10, Status: mfclients.AllStatus, Subject: clientB.ID, Action: "g_list"}}) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + if tc.ID == clientA.ID { + assert.ElementsMatch(t, mp.Memberships, []mfgroups.Group{group}, fmt.Sprintf("%s: expected %v got %v\n", desc, []mfgroups.Group{group}, mp.Memberships)) + } + } +} + +func TestGroupChangeStatus(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := gpostgres.NewRepository(dbMiddleware) + + group1 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "active-group", + Status: mfclients.EnabledStatus, + } + group2 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "inactive-group", + Status: mfclients.DisabledStatus, + } + + group1, err := repo.Save(context.Background(), group1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new group: expected %v got %s\n", nil, err)) + group2, err = repo.Save(context.Background(), group2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled group: expected %v got %s\n", nil, err)) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "change group status for an active group", + group: mfgroups.Group{ + ID: group1.ID, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "change group status for a inactive group", + group: mfgroups.Group{ + ID: group2.ID, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "change group status for an invalid group", + group: mfgroups.Group{ + ID: "invalid", + Status: mfclients.DisabledStatus, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + expected, err := repo.ChangeStatus(context.Background(), tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.group.Status, expected.Status, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.group.Status, expected.Status)) + } + } +} diff --git a/things/groups/postgres/setup_test.go b/things/groups/postgres/setup_test.go new file mode 100644 index 0000000000..bbace8e6b7 --- /dev/null +++ b/things/groups/postgres/setup_test.go @@ -0,0 +1,95 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + gpostgres "github.com/mainflux/mainflux/things/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + if db, err = pgClient.SetupDB(dbConfig, *gpostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + if db, err = pgClient.Connect(dbConfig); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/things/groups/redis/doc.go b/things/groups/redis/doc.go new file mode 100644 index 0000000000..3b7b7486ae --- /dev/null +++ b/things/groups/redis/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Package redis contains cache implementations using Redis as +// the underlying database. +package redis diff --git a/things/groups/redis/events.go b/things/groups/redis/events.go new file mode 100644 index 0000000000..257c5e7cc9 --- /dev/null +++ b/things/groups/redis/events.go @@ -0,0 +1,274 @@ +package redis + +import ( + "encoding/json" + "time" + + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +const ( + groupPrefix = "channel." + groupCreate = groupPrefix + "create" + groupUpdate = groupPrefix + "update" + groupRemove = groupPrefix + "remove" + groupView = groupPrefix + "view" + groupList = groupPrefix + "list" + groupListMemberships = groupPrefix + "list_by_group" +) + +type event interface { + Encode() (map[string]interface{}, error) +} + +var ( + _ event = (*createGroupEvent)(nil) + _ event = (*updateGroupEvent)(nil) + _ event = (*removeGroupEvent)(nil) + _ event = (*viewGroupEvent)(nil) + _ event = (*listGroupEvent)(nil) + _ event = (*listGroupMembershipEvent)(nil) +) + +type createGroupEvent struct { + mfgroups.Group +} + +func (cce createGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupCreate, + "id": cce.ID, + "status": cce.Status.String(), + "created_at": cce.CreatedAt, + } + + if cce.Owner != "" { + val["owner"] = cce.Owner + } + if cce.Parent != "" { + val["parent"] = cce.Parent + } + if cce.Name != "" { + val["name"] = cce.Name + } + if cce.Description != "" { + val["description"] = cce.Description + } + if cce.Metadata != nil { + metadata, err := json.Marshal(cce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if cce.Status.String() != "" { + val["status"] = cce.Status.String() + } + if !cce.CreatedAt.IsZero() { + val["created_at"] = cce.CreatedAt + } + return val, nil +} + +type updateGroupEvent struct { + mfgroups.Group +} + +func (uce updateGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupUpdate, + "updated_at": uce.UpdatedAt, + "updated_by": uce.UpdatedBy, + } + + if uce.ID != "" { + val["id"] = uce.ID + } + if uce.Owner != "" { + val["owner"] = uce.Owner + } + if uce.Parent != "" { + val["parent"] = uce.Parent + } + if uce.Name != "" { + val["name"] = uce.Name + } + if uce.Description != "" { + val["description"] = uce.Description + } + if uce.Metadata != nil { + metadata, err := json.Marshal(uce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if !uce.CreatedAt.IsZero() { + val["created_at"] = uce.CreatedAt + } + if !uce.UpdatedAt.IsZero() { + val["updated_at"] = uce.UpdatedAt + } + if uce.Status.String() != "" { + val["status"] = uce.Status.String() + } + + return val, nil +} + +type removeGroupEvent struct { + id string + status string + updatedAt time.Time + updatedBy string +} + +func (rce removeGroupEvent) Encode() (map[string]interface{}, error) { + return map[string]interface{}{ + "operation": groupRemove, + "id": rce.id, + "status": rce.status, + "updated_at": rce.updatedAt, + "updated_by": rce.updatedBy, + }, nil +} + +type viewGroupEvent struct { + mfgroups.Group +} + +func (vce viewGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupView, + "id": vce.ID, + } + + if vce.Owner != "" { + val["owner"] = vce.Owner + } + if vce.Parent != "" { + val["parent"] = vce.Parent + } + if vce.Name != "" { + val["name"] = vce.Name + } + if vce.Description != "" { + val["description"] = vce.Description + } + if vce.Metadata != nil { + metadata, err := json.Marshal(vce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if !vce.CreatedAt.IsZero() { + val["created_at"] = vce.CreatedAt + } + if !vce.UpdatedAt.IsZero() { + val["updated_at"] = vce.UpdatedAt + } + if vce.UpdatedBy != "" { + val["updated_by"] = vce.UpdatedBy + } + if vce.Status.String() != "" { + val["status"] = vce.Status.String() + } + + return val, nil +} + +type listGroupEvent struct { + mfgroups.GroupsPage +} + +func (lce listGroupEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupList, + "total": lce.Total, + "offset": lce.Offset, + "limit": lce.Limit, + } + + if lce.Name != "" { + val["name"] = lce.Name + } + if lce.OwnerID != "" { + val["owner_id"] = lce.OwnerID + } + if lce.Tag != "" { + val["tag"] = lce.Tag + } + if lce.Metadata != nil { + metadata, err := json.Marshal(lce.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if lce.SharedBy != "" { + val["sharedBy"] = lce.SharedBy + } + if lce.Status.String() != "" { + val["status"] = lce.Status.String() + } + if lce.Action != "" { + val["action"] = lce.Action + } + if lce.Subject != "" { + val["subject"] = lce.Subject + } + + return val, nil +} + +type listGroupMembershipEvent struct { + mfgroups.GroupsPage + channelID string +} + +func (lcge listGroupMembershipEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": groupListMemberships, + "total": lcge.Total, + "offset": lcge.Offset, + "limit": lcge.Limit, + "channel_id": lcge.channelID, + } + + if lcge.Name != "" { + val["name"] = lcge.Name + } + if lcge.OwnerID != "" { + val["owner_id"] = lcge.OwnerID + } + if lcge.Tag != "" { + val["tag"] = lcge.Tag + } + if lcge.Metadata != nil { + metadata, err := json.Marshal(lcge.Metadata) + if err != nil { + return map[string]interface{}{}, err + } + + val["metadata"] = metadata + } + if lcge.SharedBy != "" { + val["shared_by"] = lcge.SharedBy + } + if lcge.Status.String() != "" { + val["status"] = lcge.Status.String() + } + if lcge.Action != "" { + val["action"] = lcge.Action + } + if lcge.Subject != "" { + val["subject"] = lcge.Subject + } + + return val, nil +} diff --git a/things/groups/redis/streams.go b/things/groups/redis/streams.go new file mode 100644 index 0000000000..de00e5185b --- /dev/null +++ b/things/groups/redis/streams.go @@ -0,0 +1,197 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import ( + "context" + + "github.com/go-redis/redis/v8" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" +) + +const ( + streamID = "mainflux.things" + streamLen = 1000 +) + +var _ groups.Service = (*eventStore)(nil) + +type eventStore struct { + svc groups.Service + client *redis.Client +} + +// NewEventStoreMiddleware returns wrapper around things service that sends +// events to event store. +func NewEventStoreMiddleware(svc groups.Service, client *redis.Client) groups.Service { + return eventStore{ + svc: svc, + client: client, + } +} + +func (es eventStore) CreateGroups(ctx context.Context, token string, groups ...mfgroups.Group) ([]mfgroups.Group, error) { + gs, err := es.svc.CreateGroups(ctx, token, groups...) + if err != nil { + return gs, err + } + + for _, group := range gs { + event := createGroupEvent{ + group, + } + values, err := event.Encode() + if err != nil { + return gs, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLen: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return gs, err + } + } + return gs, nil +} + +func (es eventStore) UpdateGroup(ctx context.Context, token string, group mfgroups.Group) (mfgroups.Group, error) { + group, err := es.svc.UpdateGroup(ctx, token, group) + if err != nil { + return mfgroups.Group{}, err + } + + event := updateGroupEvent{ + group, + } + values, err := event.Encode() + if err != nil { + return group, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) ViewGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + group, err := es.svc.ViewGroup(ctx, token, id) + if err != nil { + return mfgroups.Group{}, err + } + event := viewGroupEvent{ + group, + } + values, err := event.Encode() + if err != nil { + return group, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return group, err + } + + return group, nil +} + +func (es eventStore) ListGroups(ctx context.Context, token string, pm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + gp, err := es.svc.ListGroups(ctx, token, pm) + if err != nil { + return mfgroups.GroupsPage{}, err + } + event := listGroupEvent{ + pm, + } + values, err := event.Encode() + if err != nil { + return gp, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return gp, err + } + + return gp, nil +} + +func (es eventStore) ListMemberships(ctx context.Context, token, clientID string, pm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + mp, err := es.svc.ListMemberships(ctx, token, clientID, pm) + if err != nil { + return mfgroups.MembershipsPage{}, err + } + event := listGroupMembershipEvent{ + pm, clientID, + } + values, err := event.Encode() + if err != nil { + return mp, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return mp, err + } + + return mp, nil +} + +func (es eventStore) EnableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + cli, err := es.svc.EnableGroup(ctx, token, id) + if err != nil { + return mfgroups.Group{}, err + } + + return es.delete(ctx, cli) +} + +func (es eventStore) DisableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + cli, err := es.svc.DisableGroup(ctx, token, id) + if err != nil { + return mfgroups.Group{}, err + } + + return es.delete(ctx, cli) +} + +func (es eventStore) delete(ctx context.Context, group mfgroups.Group) (mfgroups.Group, error) { + event := removeGroupEvent{ + id: group.ID, + updatedAt: group.UpdatedAt, + updatedBy: group.UpdatedBy, + status: group.Status.String(), + } + values, err := event.Encode() + if err != nil { + return group, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return group, err + } + + return group, nil +} diff --git a/things/groups/service.go b/things/groups/service.go new file mode 100644 index 0000000000..5e925fac2b --- /dev/null +++ b/things/groups/service.go @@ -0,0 +1,216 @@ +package groups + +import ( + "context" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/groups" + upolicies "github.com/mainflux/mainflux/users/policies" +) + +// Possible token types are access and refresh tokens. +const ( + thingsObjectKey = "things" + updateRelationKey = "g_update" + listRelationKey = "g_list" + deleteRelationKey = "g_delete" + entityType = "group" +) + +type service struct { + auth upolicies.AuthServiceClient + groups groups.Repository + idProvider mainflux.IDProvider +} + +// NewService returns a new Clients service implementation. +func NewService(auth upolicies.AuthServiceClient, g groups.Repository, idp mainflux.IDProvider) Service { + return service{ + auth: auth, + groups: g, + idProvider: idp, + } +} + +func (svc service) CreateGroups(ctx context.Context, token string, gs ...groups.Group) ([]groups.Group, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return []groups.Group{}, err + } + + var grps []groups.Group + for _, g := range gs { + if g.ID == "" { + groupID, err := svc.idProvider.ID() + if err != nil { + return []groups.Group{}, err + } + g.ID = groupID + } + if g.Owner == "" { + g.Owner = userID + } + + if g.Status != mfclients.EnabledStatus && g.Status != mfclients.DisabledStatus { + return []groups.Group{}, apiutil.ErrInvalidStatus + } + + g.CreatedAt = time.Now() + g.UpdatedAt = g.CreatedAt + g.UpdatedBy = g.Owner + grp, err := svc.groups.Save(ctx, g) + if err != nil { + return []groups.Group{}, err + } + grps = append(grps, grp) + } + return grps, nil +} + +func (svc service) ViewGroup(ctx context.Context, token string, id string) (groups.Group, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return groups.Group{}, err + } + if err := svc.authorize(ctx, userID, id, listRelationKey); err != nil { + return groups.Group{}, errors.Wrap(errors.ErrNotFound, err) + } + return svc.groups.RetrieveByID(ctx, id) +} + +func (svc service) ListGroups(ctx context.Context, token string, gm groups.GroupsPage) (groups.GroupsPage, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return groups.GroupsPage{}, err + } + + // If the user is admin, fetch all channels from the database. + if err := svc.authorize(ctx, token, thingsObjectKey, listRelationKey); err == nil { + page, err := svc.groups.RetrieveAll(ctx, gm) + if err != nil { + return groups.GroupsPage{}, err + } + return page, err + } + + gm.Subject = userID + gm.OwnerID = userID + gm.Action = "g_list" + return svc.groups.RetrieveAll(ctx, gm) +} + +func (svc service) ListMemberships(ctx context.Context, token, clientID string, gm groups.GroupsPage) (groups.MembershipsPage, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return groups.MembershipsPage{}, err + } + + // If the user is admin, fetch all channels from the database. + if err := svc.authorize(ctx, token, thingsObjectKey, listRelationKey); err == nil { + return svc.groups.Memberships(ctx, clientID, gm) + } + + gm.OwnerID = userID + return svc.groups.Memberships(ctx, clientID, gm) +} + +func (svc service) UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return groups.Group{}, err + } + + if err := svc.authorize(ctx, userID, g.ID, updateRelationKey); err != nil { + return groups.Group{}, errors.Wrap(errors.ErrNotFound, err) + } + + g.Owner = userID + g.UpdatedAt = time.Now() + g.UpdatedBy = userID + + return svc.groups.Update(ctx, g) +} + +func (svc service) EnableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mfclients.EnabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, errors.Wrap(groups.ErrEnableGroup, err) + } + return group, nil +} + +func (svc service) DisableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mfclients.DisabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, errors.Wrap(groups.ErrDisableGroup, err) + } + return group, nil +} + +func (svc service) changeGroupStatus(ctx context.Context, token string, group groups.Group) (groups.Group, error) { + userID, err := svc.identifyUser(ctx, token) + if err != nil { + return groups.Group{}, err + } + if err := svc.authorize(ctx, userID, group.ID, deleteRelationKey); err != nil { + return groups.Group{}, errors.Wrap(errors.ErrNotFound, err) + } + dbGroup, err := svc.groups.RetrieveByID(ctx, group.ID) + if err != nil { + return groups.Group{}, err + } + + if dbGroup.Status == group.Status { + return groups.Group{}, mfclients.ErrStatusAlreadyAssigned + } + group.UpdatedBy = userID + return svc.groups.ChangeStatus(ctx, group) +} + +func (svc service) identifyUser(ctx context.Context, token string) (string, error) { + req := &upolicies.Token{Value: token} + res, err := svc.auth.Identify(ctx, req) + if err != nil { + return "", errors.Wrap(errors.ErrAuthorization, err) + } + return res.GetId(), nil +} + +func (svc service) authorize(ctx context.Context, subject, object string, relation string) error { + // Check if the client is the owner of the group. + dbGroup, err := svc.groups.RetrieveByID(ctx, object) + if err != nil { + return err + } + if dbGroup.Owner == subject { + return nil + } + req := &upolicies.AuthorizeReq{ + Sub: subject, + Obj: object, + Act: relation, + EntityType: entityType, + } + res, err := svc.auth.Authorize(ctx, req) + if err != nil { + return errors.Wrap(errors.ErrAuthorization, err) + } + if !res.GetAuthorized() { + return errors.ErrAuthorization + } + return nil +} diff --git a/things/groups/service_test.go b/things/groups/service_test.go new file mode 100644 index 0000000000..f91975e318 --- /dev/null +++ b/things/groups/service_test.go @@ -0,0 +1,733 @@ +package groups_test + +import ( + context "context" + fmt "fmt" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/things/clients/mocks" + "github.com/mainflux/mainflux/things/groups" + gmocks "github.com/mainflux/mainflux/things/groups/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + validGMetadata = mfclients.Metadata{"role": "client"} + inValidToken = "invalidToken" + description = "shortdescription" + gName = "groupname" + ID = testsutil.GenerateUUID(&testing.T{}, idProvider) + group = mfgroups.Group{ + ID: ID, + Name: gName, + Description: description, + Metadata: validGMetadata, + Status: mfclients.EnabledStatus, + } + withinDuration = 5 * time.Second + adminEmail = "admin@example.com" + token = "token" +) + +func newService(tokens map[string]string) (groups.Service, *gmocks.GroupRepository) { + adminPolicy := mocks.MockSubjectSet{Object: ID, Relation: []string{"g_add", "g_update", "g_list", "g_delete"}} + auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{adminEmail: {adminPolicy}}) + idProvider := uuid.NewMock() + gRepo := new(gmocks.GroupRepository) + + return groups.NewService(auth, gRepo, idProvider), gRepo +} + +func TestCreateGroup(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "create new group", + group: group, + err: nil, + }, + { + desc: "create group with existing name", + group: group, + err: nil, + }, + { + desc: "create group with parent", + group: mfgroups.Group{ + Name: gName, + Parent: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create group with invalid parent", + group: mfgroups.Group{ + Name: gName, + Parent: mocks.WrongID, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "create group with invalid owner", + group: mfgroups.Group{ + Name: gName, + Owner: mocks.WrongID, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "create group with missing name", + group: mfgroups.Group{}, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + repoCall1 := gRepo.On("Save", context.Background(), mock.Anything).Return(tc.group, tc.err) + createdAt := time.Now() + expected, err := svc.CreateGroups(context.Background(), token, tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, expected[0].ID, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, expected[0].ID)) + assert.WithinDuration(t, expected[0].CreatedAt, createdAt, withinDuration, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected[0].CreatedAt, createdAt)) + tc.group.ID = expected[0].ID + tc.group.CreatedAt = expected[0].CreatedAt + tc.group.UpdatedAt = expected[0].UpdatedAt + tc.group.UpdatedBy = expected[0].UpdatedBy + tc.group.Owner = expected[0].Owner + assert.Equal(t, tc.group, expected[0], fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group, expected[0])) + } + repoCall1.Unset() + } +} + +func TestUpdateGroup(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "update group name", + group: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + response: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + token: token, + err: nil, + }, + { + desc: "update group description", + group: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + token: token, + err: nil, + }, + { + desc: "update group metadata", + group: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + token: token, + err: nil, + }, + { + desc: "update group name with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Name: "NewName", + }, + response: mfgroups.Group{}, + token: token, + err: errors.ErrNotFound, + }, + { + desc: "update group description with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Description: "NewDescription", + }, + response: mfgroups.Group{}, + token: token, + err: errors.ErrNotFound, + }, + { + desc: "update group metadata with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{}, + token: token, + err: errors.ErrNotFound, + }, + { + desc: "update group name with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + { + desc: "update group description with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + { + desc: "update group metadata with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfgroups.Group{}, tc.err) + repoCall1 := gRepo.On("Update", context.Background(), mock.Anything).Return(tc.response, tc.err) + expectedGroup, err := svc.UpdateGroup(context.Background(), tc.token, tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, expectedGroup, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, expectedGroup)) + repoCall1.Unset() + repoCall.Unset() + } + +} + +func TestViewGroup(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + token string + groupID string + response mfgroups.Group + err error + }{ + { + + desc: "view group", + token: token, + groupID: group.ID, + response: group, + err: nil, + }, + { + desc: "view group with invalid token", + token: "wrongtoken", + groupID: group.ID, + response: mfgroups.Group{}, + err: errors.ErrAuthorization, + }, + { + desc: "view group for wrong id", + token: token, + groupID: mocks.WrongID, + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall1 := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.response, tc.err) + expected, err := svc.ViewGroup(context.Background(), tc.token, tc.groupID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, expected, tc.response, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected, tc.response)) + repoCall1.Unset() + } +} + +func TestListGroups(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + nGroups := uint64(200) + parentID := "" + var aGroups = []mfgroups.Group{} + for i := uint64(0); i < nGroups; i++ { + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: fmt.Sprintf("Group_%d", i), + Description: description, + Metadata: mfclients.Metadata{ + "field": "value", + }, + Parent: parentID, + } + parentID = group.ID + aGroups = append(aGroups, group) + } + + cases := []struct { + desc string + token string + size uint64 + response mfgroups.GroupsPage + page mfgroups.GroupsPage + err error + }{ + { + desc: "list all groups", + token: token, + size: nGroups, + err: nil, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: nGroups, + Limit: nGroups, + }, + }, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: nGroups, + Limit: nGroups, + }, + Groups: aGroups, + }, + }, + { + desc: "list groups with an offset", + token: token, + size: 150, + err: nil, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 50, + Total: nGroups, + Limit: nGroups, + }, + }, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: 150, + Limit: nGroups, + }, + Groups: aGroups[50:nGroups], + }, + }, + } + + for _, tc := range cases { + repoCall := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfgroups.Group{}, tc.err) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, tc.err) + page, err := svc.ListGroups(context.Background(), tc.token, tc.page) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall1.Unset() + repoCall.Unset() + } + +} + +func TestEnableGroup(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + enabledGroup1 := mfgroups.Group{ID: ID, Name: "group1", Status: mfclients.EnabledStatus} + disabledGroup := mfgroups.Group{ID: ID, Name: "group2", Status: mfclients.DisabledStatus} + disabledGroup1 := disabledGroup + disabledGroup1.Status = mfclients.EnabledStatus + + casesEnabled := []struct { + desc string + id string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "enable disabled group", + id: disabledGroup.ID, + token: token, + group: disabledGroup, + response: disabledGroup1, + err: nil, + }, + { + desc: "enable enabled group", + id: enabledGroup1.ID, + token: token, + group: enabledGroup1, + response: enabledGroup1, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "enable non-existing group", + id: mocks.WrongID, + token: token, + group: mfgroups.Group{}, + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range casesEnabled { + repoCall1 := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.group, tc.err) + repoCall2 := gRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.EnableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall1.Unset() + repoCall2.Unset() + } + + casesDisabled := []struct { + desc string + status mfclients.Status + size uint64 + response mfgroups.GroupsPage + }{ + { + desc: "list activated groups", + status: mfclients.EnabledStatus, + size: 2, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup1}, + }, + }, + { + desc: "list deactivated groups", + status: mfclients.DisabledStatus, + size: 1, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{disabledGroup}, + }, + }, + { + desc: "list activated and deactivated groups", + status: mfclients.AllStatus, + size: 3, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup, disabledGroup1}, + }, + }, + } + + for _, tc := range casesDisabled { + pm := mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + }, + } + repoCall := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfgroups.Group{}, nil) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListGroups(context.Background(), token, pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Groups)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestDisableGroup(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + enabledGroup1 := mfgroups.Group{ID: ID, Name: "group1", Status: mfclients.EnabledStatus} + disabledGroup := mfgroups.Group{ID: ID, Name: "group2", Status: mfclients.DisabledStatus} + disabledGroup1 := enabledGroup1 + disabledGroup1.Status = mfclients.DisabledStatus + + casesDisabled := []struct { + desc string + id string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "disable enabled group", + id: enabledGroup1.ID, + token: token, + group: enabledGroup1, + response: disabledGroup1, + err: nil, + }, + { + desc: "disable disabled group", + id: disabledGroup.ID, + token: token, + group: disabledGroup, + response: mfgroups.Group{}, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "disable non-existing group", + id: mocks.WrongID, + group: mfgroups.Group{}, + token: token, + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range casesDisabled { + repoCall1 := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.group, tc.err) + repoCall2 := gRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.DisableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall1.Unset() + repoCall2.Unset() + } + + casesEnabled := []struct { + desc string + status mfclients.Status + size uint64 + response mfgroups.GroupsPage + }{ + { + desc: "list activated groups", + status: mfclients.EnabledStatus, + size: 1, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1}, + }, + }, + { + desc: "list deactivated groups", + status: mfclients.DisabledStatus, + size: 2, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{disabledGroup1, disabledGroup}, + }, + }, + { + desc: "list activated and deactivated groups", + status: mfclients.AllStatus, + size: 3, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup, disabledGroup1}, + }, + }, + } + + for _, tc := range casesEnabled { + pm := mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + }, + } + repoCall := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfgroups.Group{}, nil) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListGroups(context.Background(), token, pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Groups)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestListMemberships(t *testing.T) { + + svc, gRepo := newService(map[string]string{token: adminEmail}) + + var nGroups = uint64(100) + var aGroups = []mfgroups.Group{} + for i := uint64(1); i < nGroups; i++ { + group := mfgroups.Group{ + Name: fmt.Sprintf("membership_%d@example.com", i), + Metadata: mfclients.Metadata{"role": "group"}, + } + aGroups = append(aGroups, group) + } + + cases := []struct { + desc string + token string + clientID string + page mfgroups.GroupsPage + response mfgroups.MembershipsPage + err error + }{ + { + desc: "list clients with authorized token", + token: token, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: adminEmail, + OwnerID: adminEmail, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 0, + Limit: 0, + }, + Memberships: aGroups, + }, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: token, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus, + Subject: adminEmail, + OwnerID: adminEmail, + Action: "g_list", + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: nGroups - 6, + }, + Memberships: aGroups[6:nGroups], + }, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: adminEmail, + OwnerID: adminEmail, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients with an invalid id", + token: token, + clientID: mocks.WrongID, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: adminEmail, + OwnerID: adminEmail, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := gRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(mfgroups.Group{}, tc.err) + repoCall1 := gRepo.On("Memberships", context.Background(), tc.clientID, tc.page).Return(tc.response, tc.err) + page, err := svc.ListMemberships(context.Background(), tc.token, tc.clientID, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + repoCall.Unset() + repoCall1.Unset() + } +} diff --git a/things/groups/tracing/tracing.go b/things/groups/tracing/tracing.go new file mode 100644 index 0000000000..75bbcc24ba --- /dev/null +++ b/things/groups/tracing/tracing.go @@ -0,0 +1,73 @@ +package tracing + +import ( + "context" + + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/things/groups" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ groups.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + gsvc groups.Service +} + +func TracingMiddleware(gsvc groups.Service, tracer trace.Tracer) groups.Service { + return &tracingMiddleware{tracer, gsvc} +} + +func (tm *tracingMiddleware) ListMemberships(ctx context.Context, token, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_memberships") + defer span.End() + return tm.gsvc.ListMemberships(ctx, token, clientID, gm) +} + +func (tm *tracingMiddleware) CreateGroups(ctx context.Context, token string, g ...mfgroups.Group) ([]mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_create_group", trace.WithAttributes(attribute.String("Name", g[0].Name))) + defer span.End() + + return tm.gsvc.CreateGroups(ctx, token, g...) + +} + +func (tm *tracingMiddleware) ViewGroup(ctx context.Context, token string, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.ViewGroup(ctx, token, id) + +} + +func (tm *tracingMiddleware) ListGroups(ctx context.Context, token string, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_groups") + defer span.End() + + return tm.gsvc.ListGroups(ctx, token, gm) + +} + +func (tm *tracingMiddleware) UpdateGroup(ctx context.Context, token string, g mfgroups.Group) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_group", trace.WithAttributes(attribute.String("Name", g.Name))) + defer span.End() + + return tm.gsvc.UpdateGroup(ctx, token, g) + +} + +func (tm *tracingMiddleware) EnableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_enable_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.EnableGroup(ctx, token, id) +} + +func (tm *tracingMiddleware) DisableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_disable_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.DisableGroup(ctx, token, id) +} diff --git a/things/mocks/auth.go b/things/mocks/auth.go deleted file mode 100644 index 258652c2bc..0000000000 --- a/things/mocks/auth.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "google.golang.org/grpc" -) - -var _ mainflux.AuthServiceClient = (*authServiceMock)(nil) - -type MockSubjectSet struct { - Object string - Relation string -} - -type authServiceMock struct { - users map[string]string - policies map[string][]MockSubjectSet -} - -func (svc authServiceMock) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { - res := mainflux.ListPoliciesRes{} - for key := range svc.policies { - res.Policies = append(res.Policies, key) - } - return &res, nil -} - -// NewAuthService creates mock of users service. -func NewAuthService(users map[string]string, policies map[string][]MockSubjectSet) mainflux.AuthServiceClient { - return &authServiceMock{users, policies} -} - -func (svc authServiceMock) Identify(ctx context.Context, in *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { - if id, ok := svc.users[in.Value]; ok { - return &mainflux.UserIdentity{Id: id, Email: id}, nil - } - return nil, errors.ErrAuthentication -} - -func (svc authServiceMock) Issue(ctx context.Context, in *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { - if id, ok := svc.users[in.GetEmail()]; ok { - switch in.Type { - default: - return &mainflux.Token{Value: id}, nil - } - } - return nil, errors.ErrAuthentication -} - -func (svc authServiceMock) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { - for _, policy := range svc.policies[req.GetSub()] { - if policy.Relation == req.GetAct() && policy.Object == req.GetObj() { - return &mainflux.AuthorizeRes{Authorized: true}, nil - } - } - return nil, errors.ErrAuthorization -} - -func (svc authServiceMock) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { - if in.GetAct() == "" || in.GetObj() == "" || in.GetSub() == "" { - return &mainflux.AddPolicyRes{}, errors.ErrMalformedEntity - } - - obj := in.GetObj() - svc.policies[in.GetSub()] = append(svc.policies[in.GetSub()], MockSubjectSet{Object: obj, Relation: in.GetAct()}) - return &mainflux.AddPolicyRes{Authorized: true}, nil -} - -func (svc authServiceMock) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - // Not implemented yet - return &mainflux.DeletePolicyRes{Deleted: true}, nil -} - -func (svc authServiceMock) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - panic("not implemented") -} - -func (svc authServiceMock) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { - panic("not implemented") -} diff --git a/things/mocks/channels.go b/things/mocks/channels.go deleted file mode 100644 index 4b646ae2c2..0000000000 --- a/things/mocks/channels.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -// Connection represents connection between channel and thing that is used for -// testing purposes. -type Connection struct { - chanID string - thing things.Thing - connected bool -} - -var _ things.ChannelRepository = (*channelRepositoryMock)(nil) - -type channelRepositoryMock struct { - mu sync.Mutex - counter uint64 - channels map[string]things.Channel - tconns chan Connection // used for synchronization with thing repo - cconns map[string]map[string]things.Channel // used to track connections - things things.ThingRepository -} - -// NewChannelRepository creates in-memory channel repository. -func NewChannelRepository(repo things.ThingRepository, tconns chan Connection) things.ChannelRepository { - return &channelRepositoryMock{ - channels: make(map[string]things.Channel), - tconns: tconns, - cconns: make(map[string]map[string]things.Channel), - things: repo, - } -} - -func (crm *channelRepositoryMock) Save(_ context.Context, channels ...things.Channel) ([]things.Channel, error) { - crm.mu.Lock() - defer crm.mu.Unlock() - - for i := range channels { - crm.counter++ - if channels[i].ID == "" { - channels[i].ID = fmt.Sprintf("%03d", crm.counter) - } - crm.channels[key(channels[i].Owner, channels[i].ID)] = channels[i] - } - - return channels, nil -} - -func (crm *channelRepositoryMock) Update(_ context.Context, channel things.Channel) error { - crm.mu.Lock() - defer crm.mu.Unlock() - - dbKey := key(channel.Owner, channel.ID) - - if _, ok := crm.channels[dbKey]; !ok { - return errors.ErrNotFound - } - - crm.channels[dbKey] = channel - return nil -} - -func (crm *channelRepositoryMock) RetrieveByID(_ context.Context, owner, id string) (things.Channel, error) { - if c, ok := crm.channels[key(owner, id)]; ok { - return c, nil - } - - return things.Channel{}, errors.ErrNotFound -} - -func (crm *channelRepositoryMock) RetrieveAll(_ context.Context, owner string, pm things.PageMetadata) (things.ChannelsPage, error) { - if pm.Limit == 0 { - pm.Limit = 10 - } - - first := int(pm.Offset) - last := first + int(pm.Limit) - - var chs []things.Channel - - // This obscure way to examine map keys is enforced by the key structure - // itself (see mocks/commons.go). - prefix := fmt.Sprintf("%s-", owner) - for k, v := range crm.channels { - if strings.HasPrefix(k, prefix) { - chs = append(chs, v) - } - } - - // Sort Channels list - chs = sortChannels(pm, chs) - - if last > len(chs) { - last = len(chs) - } - - if first > last { - return things.ChannelsPage{}, nil - } - - page := things.ChannelsPage{ - Channels: chs[first:last], - PageMetadata: things.PageMetadata{ - Total: crm.counter, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (crm *channelRepositoryMock) RetrieveByThing(_ context.Context, owner, thID string, pm things.PageMetadata) (things.ChannelsPage, error) { - if pm.Limit <= 0 { - return things.ChannelsPage{}, nil - } - - first := uint64(pm.Offset) + 1 - last := first + uint64(pm.Limit) - - var chs []things.Channel - - // Append connected or not connected channels - switch pm.Disconnected { - case false: - for _, co := range crm.cconns[thID] { - id := parseID(co.ID) - if id >= first && id < last { - chs = append(chs, co) - } - } - default: - for _, ch := range crm.channels { - conn := false - id := parseID(ch.ID) - if id >= first && id < last { - for _, co := range crm.cconns[thID] { - if ch.ID == co.ID { - conn = true - } - } - - // Append if not found in connections list - if !conn { - chs = append(chs, ch) - } - } - } - } - - // Sort Channels by Thing list - chs = sortChannels(pm, chs) - - page := things.ChannelsPage{ - Channels: chs, - PageMetadata: things.PageMetadata{ - Total: crm.counter, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (crm *channelRepositoryMock) Remove(_ context.Context, owner, id string) error { - delete(crm.channels, key(owner, id)) - // delete channel from any thing list - for thk := range crm.cconns { - delete(crm.cconns[thk], key(owner, id)) - } - crm.tconns <- Connection{ - chanID: id, - connected: false, - } - return nil -} - -func (crm *channelRepositoryMock) Connect(_ context.Context, owner string, chIDs, thIDs []string) error { - for _, chID := range chIDs { - ch, err := crm.RetrieveByID(context.Background(), owner, chID) - if err != nil { - return err - } - - for _, thID := range thIDs { - th, err := crm.things.RetrieveByID(context.Background(), owner, thID) - if err != nil { - return err - } - - crm.tconns <- Connection{ - chanID: chID, - thing: th, - connected: true, - } - if _, ok := crm.cconns[thID]; !ok { - crm.cconns[thID] = make(map[string]things.Channel) - } - crm.cconns[thID][chID] = ch - } - } - - return nil -} - -func (crm *channelRepositoryMock) Disconnect(_ context.Context, owner string, chIDs, thIDs []string) error { - for _, chID := range chIDs { - for _, thID := range thIDs { - if _, ok := crm.cconns[thID]; !ok { - return errors.ErrNotFound - } - - if _, ok := crm.cconns[thID][chID]; !ok { - return errors.ErrNotFound - } - - crm.tconns <- Connection{ - chanID: chID, - thing: things.Thing{ID: thID, Owner: owner}, - connected: false, - } - delete(crm.cconns[thID], chID) - } - } - - return nil -} - -func (crm *channelRepositoryMock) HasThing(_ context.Context, chanID, token string) (string, error) { - tid, err := crm.things.RetrieveByKey(context.Background(), token) - if err != nil { - return "", err - } - - chans, ok := crm.cconns[tid] - if !ok { - return "", errors.ErrAuthorization - } - - if _, ok := chans[chanID]; !ok { - return "", errors.ErrAuthorization - } - - return tid, nil -} - -func (crm *channelRepositoryMock) HasThingByID(_ context.Context, chanID, thingID string) error { - chans, ok := crm.cconns[thingID] - if !ok { - return errors.ErrAuthorization - } - - if _, ok := chans[chanID]; !ok { - return errors.ErrAuthorization - } - - return nil -} - -type channelCacheMock struct { - mu sync.Mutex - channels map[string]string -} - -// NewChannelCache returns mock cache instance. -func NewChannelCache() things.ChannelCache { - return &channelCacheMock{ - channels: make(map[string]string), - } -} - -func (ccm *channelCacheMock) Connect(_ context.Context, chanID, thingID string) error { - ccm.mu.Lock() - defer ccm.mu.Unlock() - - ccm.channels[chanID] = thingID - return nil -} - -func (ccm *channelCacheMock) HasThing(_ context.Context, chanID, thingID string) bool { - ccm.mu.Lock() - defer ccm.mu.Unlock() - - return ccm.channels[chanID] == thingID -} - -func (ccm *channelCacheMock) Disconnect(_ context.Context, chanID, thingID string) error { - ccm.mu.Lock() - defer ccm.mu.Unlock() - - delete(ccm.channels, chanID) - return nil -} - -func (ccm *channelCacheMock) Remove(_ context.Context, chanID string) error { - ccm.mu.Lock() - defer ccm.mu.Unlock() - - delete(ccm.channels, chanID) - return nil -} diff --git a/things/mocks/commons.go b/things/mocks/commons.go deleted file mode 100644 index e498f12254..0000000000 --- a/things/mocks/commons.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "fmt" - "sort" - "strconv" - - "github.com/mainflux/mainflux/things" -) - -const uuidLen = 36 -const asc = "asc" -const desc = "desc" - -// Since mocks will store data in map, and they need to resemble the real -// identifiers as much as possible, a key will be created as combination of -// owner and their own identifiers. This will allow searching either by -// prefix or suffix. -func key(owner string, id string) string { - return fmt.Sprintf("%s-%s", owner, id) -} - -func sortThings(pm things.PageMetadata, ths []things.Thing) []things.Thing { - switch pm.Order { - case "name": - if pm.Dir == asc { - sort.SliceStable(ths, func(i, j int) bool { - return ths[i].Name < ths[j].Name - }) - } - if pm.Dir == desc { - sort.SliceStable(ths, func(i, j int) bool { - return ths[i].Name > ths[j].Name - }) - } - case "id": - if pm.Dir == asc { - sort.SliceStable(ths, func(i, j int) bool { - return ths[i].ID < ths[j].ID - }) - } - if pm.Dir == desc { - sort.SliceStable(ths, func(i, j int) bool { - return ths[i].ID > ths[j].ID - }) - } - default: - sort.SliceStable(ths, func(i, j int) bool { - return ths[i].ID < ths[j].ID - }) - } - - return ths -} - -func sortChannels(pm things.PageMetadata, chs []things.Channel) []things.Channel { - switch pm.Order { - case "name": - if pm.Dir == asc { - sort.SliceStable(chs, func(i, j int) bool { - return chs[i].Name < chs[j].Name - }) - } - if pm.Dir == desc { - sort.SliceStable(chs, func(i, j int) bool { - return chs[i].Name > chs[j].Name - }) - } - case "id": - if pm.Dir == asc { - sort.SliceStable(chs, func(i, j int) bool { - return chs[i].ID < chs[j].ID - }) - } - if pm.Dir == desc { - sort.SliceStable(chs, func(i, j int) bool { - return chs[i].ID > chs[j].ID - }) - } - default: - sort.SliceStable(chs, func(i, j int) bool { - return chs[i].ID < chs[j].ID - }) - } - - return chs -} - -func parseID(ID string) (id uint64) { - var serialNum string - - if len(ID) == uuidLen { - serialNum = ID[len(ID)-6:] - } - id, _ = strconv.ParseUint(serialNum, 10, 64) - - return -} diff --git a/things/mocks/things.go b/things/mocks/things.go deleted file mode 100644 index 42d94932fd..0000000000 --- a/things/mocks/things.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -var _ things.ThingRepository = (*thingRepositoryMock)(nil) - -type thingRepositoryMock struct { - mu sync.Mutex - counter uint64 - conns chan Connection - tconns map[string]map[string]things.Thing - things map[string]things.Thing -} - -// NewThingRepository creates in-memory thing repository. -func NewThingRepository(conns chan Connection) things.ThingRepository { - repo := &thingRepositoryMock{ - conns: conns, - things: make(map[string]things.Thing), - tconns: make(map[string]map[string]things.Thing), - } - go func(conns chan Connection, repo *thingRepositoryMock) { - for conn := range conns { - if !conn.connected { - repo.disconnect(conn) - continue - } - repo.connect(conn) - } - }(conns, repo) - - return repo -} - -func (trm *thingRepositoryMock) Save(_ context.Context, ths ...things.Thing) ([]things.Thing, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - for i := range ths { - for _, th := range trm.things { - if th.Key == ths[i].Key { - return []things.Thing{}, errors.ErrConflict - } - } - - trm.counter++ - if ths[i].ID == "" { - ths[i].ID = fmt.Sprintf("%03d", trm.counter) - } - trm.things[key(ths[i].Owner, ths[i].ID)] = ths[i] - } - - return ths, nil -} - -func (trm *thingRepositoryMock) Update(_ context.Context, thing things.Thing) error { - trm.mu.Lock() - defer trm.mu.Unlock() - - dbKey := key(thing.Owner, thing.ID) - - if _, ok := trm.things[dbKey]; !ok { - return errors.ErrNotFound - } - - trm.things[dbKey] = thing - - return nil -} - -func (trm *thingRepositoryMock) UpdateKey(_ context.Context, owner, id, val string) error { - trm.mu.Lock() - defer trm.mu.Unlock() - - for _, th := range trm.things { - if th.Key == val { - return errors.ErrConflict - } - } - - dbKey := key(owner, id) - - th, ok := trm.things[dbKey] - if !ok { - return errors.ErrNotFound - } - - th.Key = val - trm.things[dbKey] = th - - return nil -} - -func (trm *thingRepositoryMock) RetrieveByID(_ context.Context, owner, id string) (things.Thing, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - if c, ok := trm.things[key(owner, id)]; ok { - return c, nil - } - - return things.Thing{}, errors.ErrNotFound -} - -func (trm *thingRepositoryMock) RetrieveAll(_ context.Context, owner string, pm things.PageMetadata) (things.Page, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - first := uint64(pm.Offset) + 1 - last := first + uint64(pm.Limit) - - var ths []things.Thing - - // This obscure way to examine map keys is enforced by the key structure - // itself (see mocks/commons.go). - prefix := fmt.Sprintf("%s-", owner) - for k, v := range trm.things { - id := parseID(v.ID) - if strings.HasPrefix(k, prefix) && id >= first && id < last { - ths = append(ths, v) - } - } - - // Sort Things list - ths = sortThings(pm, ths) - - page := things.Page{ - Things: ths, - PageMetadata: things.PageMetadata{ - Total: trm.counter, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (trm *thingRepositoryMock) RetrieveByIDs(_ context.Context, thingIDs []string, pm things.PageMetadata) (things.Page, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - items := make([]things.Thing, 0) - - if pm.Limit == 0 { - return things.Page{}, nil - } - - first := uint64(pm.Offset) + 1 - last := first + uint64(pm.Limit) - - // This obscure way to examine map keys is enforced by the key structure - // itself (see mocks/commons.go). - for _, id := range thingIDs { - suffix := fmt.Sprintf("-%s", id) - for k, v := range trm.things { - id := parseID(v.ID) - if strings.HasSuffix(k, suffix) && id >= first && id < last { - items = append(items, v) - } - } - } - - items = sortThings(pm, items) - - page := things.Page{ - Things: items, - PageMetadata: things.PageMetadata{ - Total: trm.counter, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (trm *thingRepositoryMock) RetrieveByChannel(_ context.Context, owner, chID string, pm things.PageMetadata) (things.Page, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - if pm.Limit <= 0 { - return things.Page{}, nil - } - - first := uint64(pm.Offset) + 1 - last := first + uint64(pm.Limit) - - var ths []things.Thing - - // Append connected or not connected channels - switch pm.Disconnected { - case false: - for _, co := range trm.tconns[chID] { - id := parseID(co.ID) - if id >= first && id < last { - ths = append(ths, co) - } - } - default: - for _, th := range trm.things { - conn := false - id := parseID(th.ID) - if id >= first && id < last { - for _, co := range trm.tconns[chID] { - if th.ID == co.ID { - conn = true - } - } - - // Append if not found in connections list - if !conn { - ths = append(ths, th) - } - } - } - } - - // Sort Things by Channel list - ths = sortThings(pm, ths) - - page := things.Page{ - Things: ths, - PageMetadata: things.PageMetadata{ - Total: trm.counter, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (trm *thingRepositoryMock) Remove(_ context.Context, owner, id string) error { - trm.mu.Lock() - defer trm.mu.Unlock() - delete(trm.things, key(owner, id)) - return nil -} - -func (trm *thingRepositoryMock) RetrieveByKey(_ context.Context, key string) (string, error) { - trm.mu.Lock() - defer trm.mu.Unlock() - - for _, thing := range trm.things { - if thing.Key == key { - return thing.ID, nil - } - } - - return "", errors.ErrNotFound -} - -func (trm *thingRepositoryMock) connect(conn Connection) { - trm.mu.Lock() - defer trm.mu.Unlock() - - if _, ok := trm.tconns[conn.chanID]; !ok { - trm.tconns[conn.chanID] = make(map[string]things.Thing) - } - trm.tconns[conn.chanID][conn.thing.ID] = conn.thing -} - -func (trm *thingRepositoryMock) disconnect(conn Connection) { - trm.mu.Lock() - defer trm.mu.Unlock() - - if conn.thing.ID == "" { - delete(trm.tconns, conn.chanID) - return - } - delete(trm.tconns[conn.chanID], conn.thing.ID) -} - -type thingCacheMock struct { - mu sync.Mutex - things map[string]string -} - -// NewThingCache returns mock cache instance. -func NewThingCache() things.ThingCache { - return &thingCacheMock{ - things: make(map[string]string), - } -} - -func (tcm *thingCacheMock) Save(_ context.Context, key, id string) error { - tcm.mu.Lock() - defer tcm.mu.Unlock() - - tcm.things[key] = id - return nil -} - -func (tcm *thingCacheMock) ID(_ context.Context, key string) (string, error) { - tcm.mu.Lock() - defer tcm.mu.Unlock() - - id, ok := tcm.things[key] - if !ok { - return "", errors.ErrNotFound - } - - return id, nil -} - -func (tcm *thingCacheMock) Remove(_ context.Context, id string) error { - tcm.mu.Lock() - defer tcm.mu.Unlock() - - for key, val := range tcm.things { - if val == id { - delete(tcm.things, key) - return nil - } - } - - return nil -} diff --git a/things/policies/api/grpc/client.go b/things/policies/api/grpc/client.go new file mode 100644 index 0000000000..17529cb931 --- /dev/null +++ b/things/policies/api/grpc/client.go @@ -0,0 +1,98 @@ +package grpc + +import ( + "context" + "time" + + "github.com/go-kit/kit/endpoint" + kitgrpc "github.com/go-kit/kit/transport/grpc" + "github.com/mainflux/mainflux/things/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + "google.golang.org/grpc" +) + +const svcName = "mainflux.things.policies.ThingsService" + +var _ policies.ThingsServiceClient = (*grpcClient)(nil) + +type grpcClient struct { + authorize endpoint.Endpoint + identify endpoint.Endpoint + timeout time.Duration +} + +// NewClient returns new gRPC client instance. +func NewClient(conn *grpc.ClientConn, timeout time.Duration) policies.ThingsServiceClient { + return &grpcClient{ + authorize: otelkit.EndpointMiddleware(otelkit.WithOperation("authorize"))(kitgrpc.NewClient( + conn, + svcName, + "Authorize", + encodeAuthorizeRequest, + decodeAuthorizeResponse, + policies.AuthorizeRes{}, + ).Endpoint()), + identify: otelkit.EndpointMiddleware(otelkit.WithOperation("identify"))(kitgrpc.NewClient( + conn, + svcName, + "Identify", + encodeIdentifyRequest, + decodeIdentityResponse, + policies.ClientID{}, + ).Endpoint()), + + timeout: timeout, + } +} + +func (client grpcClient) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (*policies.AuthorizeRes, error) { + ctx, cancel := context.WithTimeout(ctx, client.timeout) + defer cancel() + + areq := authorizeReq{ + entityType: req.GetEntityType(), + clientID: req.GetSub(), + groupID: req.GetObj(), + action: req.GetAct(), + } + res, err := client.authorize(ctx, areq) + if err != nil { + return nil, err + } + + ares := res.(authorizeRes) + return &policies.AuthorizeRes{ThingID: ares.thingID, Authorized: ares.authorized}, nil +} + +func (client grpcClient) Identify(ctx context.Context, req *policies.Key, _ ...grpc.CallOption) (*policies.ClientID, error) { + ctx, cancel := context.WithTimeout(ctx, client.timeout) + defer cancel() + + res, err := client.identify(ctx, identifyReq{key: req.GetValue()}) + if err != nil { + return nil, err + } + + ir := res.(identityRes) + return &policies.ClientID{Value: ir.id}, nil +} + +func encodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(authorizeReq) + return &policies.AuthorizeReq{Sub: req.clientID, Obj: req.groupID, Act: req.action, EntityType: req.entityType}, nil +} + +func encodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(identifyReq) + return &policies.Key{Value: req.key}, nil +} + +func decodeIdentityResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.ClientID) + return identityRes{id: res.GetValue()}, nil +} + +func decodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.AuthorizeRes) + return authorizeRes{thingID: res.GetThingID(), authorized: res.GetAuthorized()}, nil +} diff --git a/things/api/auth/grpc/doc.go b/things/policies/api/grpc/doc.go similarity index 100% rename from things/api/auth/grpc/doc.go rename to things/policies/api/grpc/doc.go diff --git a/things/policies/api/grpc/endpoint.go b/things/policies/api/grpc/endpoint.go new file mode 100644 index 0000000000..3cd2086cb4 --- /dev/null +++ b/things/policies/api/grpc/endpoint.go @@ -0,0 +1,43 @@ +package grpc + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/policies" +) + +func authorizeEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(authorizeReq) + if err := req.validate(); err != nil { + return nil, err + } + ar := policies.AccessRequest{ + Subject: req.clientID, + Object: req.groupID, + Action: req.action, + } + thindID, err := svc.Authorize(ctx, ar, req.entityType) + if err != nil { + return authorizeRes{authorized: false}, err + } + + return authorizeRes{authorized: true, thingID: thindID}, nil + } +} + +func identifyEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(identifyReq) + id, err := svc.Identify(ctx, req.key) + if err := req.validate(); err != nil { + return nil, err + } + if err != nil { + return identityRes{}, err + } + return identityRes{id: id}, nil + } +} diff --git a/things/policies/api/grpc/requests.go b/things/policies/api/grpc/requests.go new file mode 100644 index 0000000000..c9752f683a --- /dev/null +++ b/things/policies/api/grpc/requests.go @@ -0,0 +1,39 @@ +package grpc + +import ( + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/things/policies" +) + +type authorizeReq struct { + entityType string + clientID string + groupID string + action string +} + +func (req authorizeReq) validate() error { + if req.clientID == "" { + return apiutil.ErrMissingPolicySub + } + if req.groupID == "" { + return apiutil.ErrMissingPolicyObj + } + if ok := policies.ValidateAction(req.action); !ok { + return apiutil.ErrMalformedPolicyAct + } + + return nil +} + +type identifyReq struct { + key string +} + +func (req identifyReq) validate() error { + if req.key == "" { + return apiutil.ErrBearerKey + } + + return nil +} diff --git a/things/policies/api/grpc/responses.go b/things/policies/api/grpc/responses.go new file mode 100644 index 0000000000..1ba151a2ef --- /dev/null +++ b/things/policies/api/grpc/responses.go @@ -0,0 +1,10 @@ +package grpc + +type identityRes struct { + id string +} + +type authorizeRes struct { + thingID string + authorized bool +} diff --git a/things/policies/api/grpc/transport.go b/things/policies/api/grpc/transport.go new file mode 100644 index 0000000000..3ac53d789f --- /dev/null +++ b/things/policies/api/grpc/transport.go @@ -0,0 +1,103 @@ +package grpc + +import ( + "context" + + kitgrpc "github.com/go-kit/kit/transport/grpc" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ policies.ThingsServiceServer = (*grpcServer)(nil) + +type grpcServer struct { + authorize kitgrpc.Handler + identify kitgrpc.Handler + policies.UnimplementedThingsServiceServer +} + +// NewServer returns new ThingsServiceServer instance. +func NewServer(csvc clients.Service, psvc policies.Service) policies.ThingsServiceServer { + return &grpcServer{ + authorize: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("can_access_by_id"))(authorizeEndpoint(psvc)), + decodeAuthorizeRequest, + encodeAuthorizeResponse, + ), + identify: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("identify"))(identifyEndpoint(csvc)), + decodeIdentifyRequest, + encodeIdentityResponse, + ), + } +} + +func (gs *grpcServer) Authorize(ctx context.Context, req *policies.AuthorizeReq) (*policies.AuthorizeRes, error) { + _, res, err := gs.authorize.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + + return res.(*policies.AuthorizeRes), nil +} + +func (gs *grpcServer) Identify(ctx context.Context, req *policies.Key) (*policies.ClientID, error) { + _, res, err := gs.identify.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + + return res.(*policies.ClientID), nil +} + +func decodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.AuthorizeReq) + return authorizeReq{entityType: req.GetEntityType(), clientID: req.GetSub(), groupID: req.GetObj(), action: req.GetAct()}, nil +} + +func decodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.Key) + return identifyReq{key: req.GetValue()}, nil +} + +func encodeIdentityResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(identityRes) + return &policies.ClientID{Value: res.id}, nil +} + +func encodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(authorizeRes) + return &policies.AuthorizeRes{ThingID: res.thingID, Authorized: res.authorized}, nil +} + +func encodeError(err error) error { + switch { + case errors.Contains(err, nil): + return nil + case errors.Contains(err, errors.ErrMalformedEntity), + err == apiutil.ErrInvalidAuthKey, + err == apiutil.ErrMissingID, + err == apiutil.ErrMissingPolicySub, + err == apiutil.ErrMissingPolicyObj, + err == apiutil.ErrMalformedPolicyAct, + err == apiutil.ErrMalformedPolicy, + err == apiutil.ErrMissingPolicyOwner, + err == apiutil.ErrBearerKey, + err == apiutil.ErrHigherPolicyRank: + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Contains(err, errors.ErrAuthentication), + err == apiutil.ErrBearerToken: + return status.Error(codes.Unauthenticated, err.Error()) + case errors.Contains(err, errors.ErrAuthorization): + return status.Error(codes.PermissionDenied, err.Error()) + case errors.Contains(err, errors.ErrNotFound): + return status.Error(codes.NotFound, "entity does not exist") + default: + return status.Error(codes.Internal, "internal server error") + } +} diff --git a/things/policies/api/http/doc.go b/things/policies/api/http/doc.go new file mode 100644 index 0000000000..1fa6fe5fe8 --- /dev/null +++ b/things/policies/api/http/doc.go @@ -0,0 +1,5 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Package http contains implementation of things auth service HTTP API. +package api diff --git a/things/policies/api/http/endpoints.go b/things/policies/api/http/endpoints.go new file mode 100644 index 0000000000..e75115d619 --- /dev/null +++ b/things/policies/api/http/endpoints.go @@ -0,0 +1,188 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/policies" +) + +func identifyEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(identifyReq) + if err := req.validate(); err != nil { + return nil, err + } + + id, err := svc.Identify(ctx, req.Token) + if err != nil { + return nil, err + } + + return identityRes{ID: id}, nil + } +} + +func authorizeEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(authorizeReq) + if err := req.validate(); err != nil { + return nil, err + } + ar := policies.AccessRequest{ + Subject: req.ClientSecret, + Object: req.GroupID, + Action: req.Action, + } + id, err := svc.Authorize(ctx, ar, req.EntityType) + if err != nil { + return authorizeRes{Authorized: false}, err + } + + return authorizeRes{ThingID: id, Authorized: true}, nil + } +} + +func connectEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + cr := request.(createPolicyReq) + + if err := cr.validate(); err != nil { + return nil, err + } + if len(cr.Actions) == 0 { + cr.Actions = policies.PolicyTypes + } + policy := policies.Policy{ + Subject: cr.ClientID, + Object: cr.GroupID, + Actions: cr.Actions, + } + policy, err := svc.AddPolicy(ctx, cr.token, policy) + if err != nil { + return nil, err + } + + return policyRes{[]policies.Policy{policy}, true}, nil + } +} + +func connectThingsEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + cr := request.(createPoliciesReq) + + if err := cr.validate(); err != nil { + return nil, err + } + ps := []policies.Policy{} + for _, tid := range cr.ClientIDs { + for _, cid := range cr.GroupIDs { + if len(cr.Actions) == 0 { + cr.Actions = policies.PolicyTypes + } + policy := policies.Policy{ + Subject: tid, + Object: cid, + Actions: cr.Actions, + } + if _, err := svc.AddPolicy(ctx, cr.token, policy); err != nil { + return nil, err + } + ps = append(ps, policy) + } + } + + return policyRes{created: true, Policies: ps}, nil + } +} + +func updatePolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + cr := request.(policyReq) + + if err := cr.validate(); err != nil { + return nil, err + } + policy := policies.Policy{ + Subject: cr.ClientID, + Object: cr.GroupID, + Actions: policies.PolicyTypes, + } + policy, err := svc.UpdatePolicy(ctx, cr.token, policy) + if err != nil { + return nil, err + } + + return policyRes{[]policies.Policy{policy}, true}, nil + } +} + +func listPoliciesEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + lpr := request.(listPoliciesReq) + + if err := lpr.validate(); err != nil { + return nil, err + } + policy := policies.Page{ + Limit: lpr.limit, + Offset: lpr.offset, + Subject: lpr.client, + Object: lpr.group, + Action: lpr.action, + OwnerID: lpr.owner, + } + policyPage, err := svc.ListPolicies(ctx, lpr.token, policy) + if err != nil { + return nil, err + } + + return listPolicyRes{policyPage}, nil + } +} + +func disconnectEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + cr := request.(createPolicyReq) + if err := cr.validate(); err != nil { + return nil, err + } + + if len(cr.Actions) == 0 { + cr.Actions = policies.PolicyTypes + } + policy := policies.Policy{ + Subject: cr.ClientID, + Object: cr.GroupID, + Actions: cr.Actions, + } + if err := svc.DeletePolicy(ctx, cr.token, policy); err != nil { + return nil, err + } + + return deletePolicyRes{}, nil + } +} + +func disconnectThingsEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createPoliciesReq) + if err := req.validate(); err != nil { + return nil, err + } + for _, tid := range req.ClientIDs { + for _, cid := range req.GroupIDs { + policy := policies.Policy{ + Subject: tid, + Object: cid, + } + if err := svc.DeletePolicy(ctx, req.token, policy); err != nil { + return nil, err + } + } + } + + return deletePolicyRes{}, nil + } +} diff --git a/things/policies/api/http/logging.go b/things/policies/api/http/logging.go new file mode 100644 index 0000000000..661956d115 --- /dev/null +++ b/things/policies/api/http/logging.go @@ -0,0 +1,82 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/things/policies" +) + +var _ policies.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc policies.Service +} + +// LoggingMiddleware returns a new logging middleware. +func LoggingMiddleware(svc policies.Service, logger mflog.Logger) policies.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) Authorize(ctx context.Context, ar policies.AccessRequest, entityType string) (id string, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method authorize for channel with id %s by client with id %s took %s to complete", ar.Object, ar.Subject, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.Authorize(ctx, ar, entityType) +} + +func (lm *loggingMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) (policy policies.Policy, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method add_policy for client with id %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.AddPolicy(ctx, token, p) +} + +func (lm *loggingMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) (policy policies.Policy, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_policy for client with id %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdatePolicy(ctx, token, p) +} + +func (lm *loggingMiddleware) ListPolicies(ctx context.Context, token string, p policies.Page) (policypage policies.PolicyPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method add_policy for client with id %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListPolicies(ctx, token, p) +} + +func (lm *loggingMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method delete_policy for client with id %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DeletePolicy(ctx, token, p) +} diff --git a/things/policies/api/http/metrics.go b/things/policies/api/http/metrics.go new file mode 100644 index 0000000000..b1b129846f --- /dev/null +++ b/things/policies/api/http/metrics.go @@ -0,0 +1,66 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + "github.com/mainflux/mainflux/things/policies" +) + +var _ policies.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc policies.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc policies.Service, counter metrics.Counter, latency metrics.Histogram) policies.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) (policy policies.Policy, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "add_policy").Add(1) + ms.latency.With("method", "add_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.AddPolicy(ctx, token, p) +} + +func (ms *metricsMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) (policy policies.Policy, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_policy").Add(1) + ms.latency.With("method", "update_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdatePolicy(ctx, token, p) +} + +func (ms *metricsMiddleware) ListPolicies(ctx context.Context, token string, p policies.Page) (policypage policies.PolicyPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_policies").Add(1) + ms.latency.With("method", "list_policies").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListPolicies(ctx, token, p) +} + +func (ms *metricsMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "delete_policy").Add(1) + ms.latency.With("method", "delete_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DeletePolicy(ctx, token, p) +} + +func (ms *metricsMiddleware) Authorize(ctx context.Context, ar policies.AccessRequest, entityType string) (id string, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "authorize").Add(1) + ms.latency.With("method", "authorize").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.Authorize(ctx, ar, entityType) +} diff --git a/things/policies/api/http/requests.go b/things/policies/api/http/requests.go new file mode 100644 index 0000000000..36d60b26fa --- /dev/null +++ b/things/policies/api/http/requests.go @@ -0,0 +1,120 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" +) + +type createPolicyReq struct { + token string + Owner string `json:"owner,omitempty"` + ClientID string `json:"client,omitempty"` + GroupID string `json:"group,omitempty"` + Actions []string `json:"actions,omitempty"` +} + +func (req createPolicyReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.GroupID == "" || req.ClientID == "" { + return apiutil.ErrMissingID + } + return nil +} + +type createPoliciesReq struct { + token string + Owner string `json:"owner,omitempty"` + ClientIDs []string `json:"client_ids,omitempty"` + GroupIDs []string `json:"group_ids,omitempty"` + Actions []string `json:"actions,omitempty"` +} + +func (req createPoliciesReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if len(req.GroupIDs) == 0 || len(req.ClientIDs) == 0 { + return apiutil.ErrEmptyList + } + + for _, chID := range req.GroupIDs { + if chID == "" { + return apiutil.ErrMissingID + } + } + for _, thingID := range req.ClientIDs { + if thingID == "" { + return apiutil.ErrMissingID + } + } + return nil +} + +type identifyReq struct { + Token string `json:"token"` +} + +func (req identifyReq) validate() error { + if req.Token == "" { + return apiutil.ErrBearerKey + } + + return nil +} + +type authorizeReq struct { + ClientSecret string `json:"secret"` + GroupID string `json:"group_id"` + Action string `json:"action"` + EntityType string `json:"entity_type"` +} + +func (req authorizeReq) validate() error { + if req.GroupID == "" { + return apiutil.ErrMissingID + } + if req.ClientSecret == "" { + return apiutil.ErrMissingSecret + } + + return nil +} + +type policyReq struct { + token string + Owner string `json:"owner,omitempty"` + ClientID string `json:"client,omitempty"` + GroupID string `json:"group,omitempty"` + Action string `json:"action,omitempty"` +} + +func (req policyReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + return nil +} + +type listPoliciesReq struct { + token string + offset uint64 + limit uint64 + client string + group string + action string + owner string +} + +func (req listPoliciesReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.limit > api.MaxLimitSize || req.limit < 1 { + return apiutil.ErrLimitSize + } + + return nil +} diff --git a/things/policies/api/http/responses.go b/things/policies/api/http/responses.go new file mode 100644 index 0000000000..2350079fd6 --- /dev/null +++ b/things/policies/api/http/responses.go @@ -0,0 +1,100 @@ +package api + +import ( + "net/http" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/things/policies" +) + +var ( + _ mainflux.Response = (*policyRes)(nil) + _ mainflux.Response = (*listPolicyRes)(nil) + _ mainflux.Response = (*identityRes)(nil) + _ mainflux.Response = (*authorizeRes)(nil) + _ mainflux.Response = (*deletePolicyRes)(nil) +) + +type policyRes struct { + Policies []policies.Policy `json:"policies"` + created bool +} + +func (res policyRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res policyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res policyRes) Empty() bool { + return false +} + +type listPolicyRes struct { + policies.PolicyPage `json:"policies"` +} + +func (res listPolicyRes) Code() int { + return http.StatusOK +} + +func (res listPolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res listPolicyRes) Empty() bool { + return false +} + +type deletePolicyRes struct{} + +func (res deletePolicyRes) Code() int { + return http.StatusNoContent +} + +func (res deletePolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res deletePolicyRes) Empty() bool { + return true +} + +type identityRes struct { + ID string `json:"id"` +} + +func (res identityRes) Code() int { + return http.StatusOK +} + +func (res identityRes) Headers() map[string]string { + return map[string]string{} +} + +func (res identityRes) Empty() bool { + return false +} + +type authorizeRes struct { + ThingID string `json:"thing_id"` + Authorized bool `json:"authorized"` +} + +func (res authorizeRes) Code() int { + return http.StatusOK +} + +func (res authorizeRes) Headers() map[string]string { + return map[string]string{} +} + +func (res authorizeRes) Empty() bool { + return true +} diff --git a/things/policies/api/http/transport.go b/things/policies/api/http/transport.go new file mode 100644 index 0000000000..b086d2f99d --- /dev/null +++ b/things/policies/api/http/transport.go @@ -0,0 +1,203 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakePolicyHandler(csvc clients.Service, psvc policies.Service, mux *bone.Mux, logger logger.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + mux.Post("/connect", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("connect"))(connectThingsEndpoint(psvc)), + decodeConnectList, + api.EncodeResponse, + opts..., + )) + + mux.Post("/disconnect", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disconnect"))(disconnectThingsEndpoint(psvc)), + decodeConnectList, + api.EncodeResponse, + opts..., + )) + + mux.Post("/channels/:chanID/things/:thingID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("connect_thing"))(connectEndpoint(psvc)), + decodeConnectThing, + api.EncodeResponse, + opts..., + )) + + mux.Delete("/channels/:chanID/things/:thingID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disconnect_thing"))(disconnectEndpoint(psvc)), + decodeDisconnectThing, + api.EncodeResponse, + opts..., + )) + + mux.Post("/identify", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("identify"))(identifyEndpoint(csvc)), + decodeIdentify, + api.EncodeResponse, + opts..., + )) + + mux.Put("/identify", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_policy"))(updatePolicyEndpoint(psvc)), + decodeUpdatePolicy, + api.EncodeResponse, + opts..., + )) + + mux.Get("/identify", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_policies"))(listPoliciesEndpoint(psvc)), + decodeListPolicies, + api.EncodeResponse, + opts..., + )) + + mux.Post("/identify/channels/:chanID/access", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("authorize"))(authorizeEndpoint(psvc)), + decodeCanAccess, + api.EncodeResponse, + opts..., + )) + return mux + +} + +func decodeConnectThing(_ context.Context, r *http.Request) (interface{}, error) { + req := createPolicyReq{ + token: apiutil.ExtractBearerToken(r), + GroupID: bone.GetValue(r, "chanID"), + ClientID: bone.GetValue(r, "thingID"), + } + if r.Body != http.NoBody { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + + return req, nil +} + +func decodeDisconnectThing(_ context.Context, r *http.Request) (interface{}, error) { + req := createPolicyReq{ + token: apiutil.ExtractBearerToken(r), + GroupID: bone.GetValue(r, "chanID"), + ClientID: bone.GetValue(r, "thingID"), + } + if r.Body != http.NoBody { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + + return req, nil +} + +func decodeConnectList(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := createPoliciesReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeIdentify(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + req := identifyReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeCanAccess(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + req := authorizeReq{ + GroupID: bone.GetValue(r, "chanID"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdatePolicy(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := policyReq{token: apiutil.ExtractBearerToken(r)} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeListPolicies(_ context.Context, r *http.Request) (interface{}, error) { + o, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + l, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + c, err := apiutil.ReadStringQuery(r, api.ClientKey, "") + if err != nil { + return nil, err + } + g, err := apiutil.ReadStringQuery(r, api.GroupKey, "") + if err != nil { + return nil, err + } + a, err := apiutil.ReadStringQuery(r, api.ActionKey, "") + if err != nil { + return nil, err + } + oid, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + + req := listPoliciesReq{ + token: apiutil.ExtractBearerToken(r), + offset: o, + limit: l, + client: c, + group: g, + action: a, + owner: oid, + } + + return req, nil +} diff --git a/things/policies/auth.pb.go b/things/policies/auth.pb.go new file mode 100644 index 0000000000..8ef7d33897 --- /dev/null +++ b/things/policies/auth.pb.go @@ -0,0 +1,387 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: things/policies/auth.proto + +package policies + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthorizeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` + Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` + Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` + EntityType string `protobuf:"bytes,4,opt,name=entityType,proto3" json:"entityType,omitempty"` +} + +func (x *AuthorizeReq) Reset() { + *x = AuthorizeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_things_policies_auth_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthorizeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizeReq) ProtoMessage() {} + +func (x *AuthorizeReq) ProtoReflect() protoreflect.Message { + mi := &file_things_policies_auth_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizeReq.ProtoReflect.Descriptor instead. +func (*AuthorizeReq) Descriptor() ([]byte, []int) { + return file_things_policies_auth_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthorizeReq) GetSub() string { + if x != nil { + return x.Sub + } + return "" +} + +func (x *AuthorizeReq) GetObj() string { + if x != nil { + return x.Obj + } + return "" +} + +func (x *AuthorizeReq) GetAct() string { + if x != nil { + return x.Act + } + return "" +} + +func (x *AuthorizeReq) GetEntityType() string { + if x != nil { + return x.EntityType + } + return "" +} + +type AuthorizeRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ThingID string `protobuf:"bytes,1,opt,name=thingID,proto3" json:"thingID,omitempty"` + Authorized bool `protobuf:"varint,2,opt,name=authorized,proto3" json:"authorized,omitempty"` +} + +func (x *AuthorizeRes) Reset() { + *x = AuthorizeRes{} + if protoimpl.UnsafeEnabled { + mi := &file_things_policies_auth_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthorizeRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizeRes) ProtoMessage() {} + +func (x *AuthorizeRes) ProtoReflect() protoreflect.Message { + mi := &file_things_policies_auth_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizeRes.ProtoReflect.Descriptor instead. +func (*AuthorizeRes) Descriptor() ([]byte, []int) { + return file_things_policies_auth_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthorizeRes) GetThingID() string { + if x != nil { + return x.ThingID + } + return "" +} + +func (x *AuthorizeRes) GetAuthorized() bool { + if x != nil { + return x.Authorized + } + return false +} + +// If a key is not carrying any information itself, the type +// field can be used to determine how to validate the token. +// Also, different tokens can be encoded in different ways. +type Key struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Key) Reset() { + *x = Key{} + if protoimpl.UnsafeEnabled { + mi := &file_things_policies_auth_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Key) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Key) ProtoMessage() {} + +func (x *Key) ProtoReflect() protoreflect.Message { + mi := &file_things_policies_auth_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Key.ProtoReflect.Descriptor instead. +func (*Key) Descriptor() ([]byte, []int) { + return file_things_policies_auth_proto_rawDescGZIP(), []int{2} +} + +func (x *Key) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type ClientID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ClientID) Reset() { + *x = ClientID{} + if protoimpl.UnsafeEnabled { + mi := &file_things_policies_auth_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientID) ProtoMessage() {} + +func (x *ClientID) ProtoReflect() protoreflect.Message { + mi := &file_things_policies_auth_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientID.ProtoReflect.Descriptor instead. +func (*ClientID) Descriptor() ([]byte, []int) { + return file_things_policies_auth_proto_rawDescGZIP(), []int{3} +} + +func (x *ClientID) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_things_policies_auth_proto protoreflect.FileDescriptor + +var file_things_policies_auth_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x6d, 0x61, + 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x22, 0x48, 0x0a, 0x0c, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x74, 0x68, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, + 0x68, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x22, 0x1b, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x20, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0xbf, 0x01, 0x0a, 0x0d, 0x54, 0x68, 0x69, 0x6e, 0x67, 0x73, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, + 0x74, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x26, 0x2e, 0x6d, + 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x79, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x74, 0x68, + 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x4b, 0x65, + 0x79, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x74, 0x68, 0x69, + 0x6e, 0x67, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x49, 0x44, 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_things_policies_auth_proto_rawDescOnce sync.Once + file_things_policies_auth_proto_rawDescData = file_things_policies_auth_proto_rawDesc +) + +func file_things_policies_auth_proto_rawDescGZIP() []byte { + file_things_policies_auth_proto_rawDescOnce.Do(func() { + file_things_policies_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_things_policies_auth_proto_rawDescData) + }) + return file_things_policies_auth_proto_rawDescData +} + +var file_things_policies_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_things_policies_auth_proto_goTypes = []interface{}{ + (*AuthorizeReq)(nil), // 0: mainflux.things.policies.AuthorizeReq + (*AuthorizeRes)(nil), // 1: mainflux.things.policies.AuthorizeRes + (*Key)(nil), // 2: mainflux.things.policies.Key + (*ClientID)(nil), // 3: mainflux.things.policies.ClientID +} +var file_things_policies_auth_proto_depIdxs = []int32{ + 0, // 0: mainflux.things.policies.ThingsService.Authorize:input_type -> mainflux.things.policies.AuthorizeReq + 2, // 1: mainflux.things.policies.ThingsService.Identify:input_type -> mainflux.things.policies.Key + 1, // 2: mainflux.things.policies.ThingsService.Authorize:output_type -> mainflux.things.policies.AuthorizeRes + 3, // 3: mainflux.things.policies.ThingsService.Identify:output_type -> mainflux.things.policies.ClientID + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_things_policies_auth_proto_init() } +func file_things_policies_auth_proto_init() { + if File_things_policies_auth_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_things_policies_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthorizeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_things_policies_auth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthorizeRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_things_policies_auth_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Key); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_things_policies_auth_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_things_policies_auth_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_things_policies_auth_proto_goTypes, + DependencyIndexes: file_things_policies_auth_proto_depIdxs, + MessageInfos: file_things_policies_auth_proto_msgTypes, + }.Build() + File_things_policies_auth_proto = out.File + file_things_policies_auth_proto_rawDesc = nil + file_things_policies_auth_proto_goTypes = nil + file_things_policies_auth_proto_depIdxs = nil +} diff --git a/things/policies/auth.proto b/things/policies/auth.proto new file mode 100644 index 0000000000..25904ada9e --- /dev/null +++ b/things/policies/auth.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package mainflux.things.policies; + +option go_package = "./policies"; + +service ThingsService { + rpc Authorize(AuthorizeReq) returns (AuthorizeRes) {} + rpc Identify(Key) returns (ClientID) {} +} + +message AuthorizeReq { + string sub = 1; + string obj = 2; + string act = 3; + string entityType = 4; +} + +message AuthorizeRes { + string thingID = 1; + bool authorized = 2; +} + +// If a key is not carrying any information itself, the type +// field can be used to determine how to validate the token. +// Also, different tokens can be encoded in different ways. +message Key { + string value = 1; +} + +message ClientID { + string value = 1; +} \ No newline at end of file diff --git a/things/policies/auth_grpc.pb.go b/things/policies/auth_grpc.pb.go new file mode 100644 index 0000000000..a374bea875 --- /dev/null +++ b/things/policies/auth_grpc.pb.go @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.12 +// source: things/policies/auth.proto + +package policies + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ThingsServiceClient is the client API for ThingsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ThingsServiceClient interface { + Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) + Identify(ctx context.Context, in *Key, opts ...grpc.CallOption) (*ClientID, error) +} + +type thingsServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewThingsServiceClient(cc grpc.ClientConnInterface) ThingsServiceClient { + return &thingsServiceClient{cc} +} + +func (c *thingsServiceClient) Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) { + out := new(AuthorizeRes) + err := c.cc.Invoke(ctx, "/mainflux.things.policies.ThingsService/Authorize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *thingsServiceClient) Identify(ctx context.Context, in *Key, opts ...grpc.CallOption) (*ClientID, error) { + out := new(ClientID) + err := c.cc.Invoke(ctx, "/mainflux.things.policies.ThingsService/Identify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ThingsServiceServer is the server API for ThingsService service. +// All implementations must embed UnimplementedThingsServiceServer +// for forward compatibility +type ThingsServiceServer interface { + Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) + Identify(context.Context, *Key) (*ClientID, error) + mustEmbedUnimplementedThingsServiceServer() +} + +// UnimplementedThingsServiceServer must be embedded to have forward compatible implementations. +type UnimplementedThingsServiceServer struct { +} + +func (UnimplementedThingsServiceServer) Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method Authorize not implemented") +} +func (UnimplementedThingsServiceServer) Identify(context.Context, *Key) (*ClientID, error) { + return nil, status.Errorf(codes.Unimplemented, "method Identify not implemented") +} +func (UnimplementedThingsServiceServer) mustEmbedUnimplementedThingsServiceServer() {} + +// UnsafeThingsServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ThingsServiceServer will +// result in compilation errors. +type UnsafeThingsServiceServer interface { + mustEmbedUnimplementedThingsServiceServer() +} + +func RegisterThingsServiceServer(s grpc.ServiceRegistrar, srv ThingsServiceServer) { + s.RegisterService(&ThingsService_ServiceDesc, srv) +} + +func _ThingsService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthorizeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThingsServiceServer).Authorize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.things.policies.ThingsService/Authorize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThingsServiceServer).Authorize(ctx, req.(*AuthorizeReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _ThingsService_Identify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Key) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ThingsServiceServer).Identify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.things.policies.ThingsService/Identify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ThingsServiceServer).Identify(ctx, req.(*Key)) + } + return interceptor(ctx, in, info, handler) +} + +// ThingsService_ServiceDesc is the grpc.ServiceDesc for ThingsService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ThingsService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "mainflux.things.policies.ThingsService", + HandlerType: (*ThingsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Authorize", + Handler: _ThingsService_Authorize_Handler, + }, + { + MethodName: "Identify", + Handler: _ThingsService_Identify_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "things/policies/auth.proto", +} diff --git a/things/policies/mocks/channels.go b/things/policies/mocks/channels.go new file mode 100644 index 0000000000..758b3d1247 --- /dev/null +++ b/things/policies/mocks/channels.go @@ -0,0 +1,55 @@ +package mocks + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" +) + +type channelCacheMock struct { + mu sync.Mutex + policies map[string]string +} + +// NewChannelCache returns mock cache instance. +func NewChannelCache() policies.Cache { + return &channelCacheMock{ + policies: make(map[string]string), + } +} + +func (ccm *channelCacheMock) Put(_ context.Context, policy policies.Policy) error { + ccm.mu.Lock() + defer ccm.mu.Unlock() + + ccm.policies[fmt.Sprintf("%s:%s", policy.Subject, policy.Object)] = strings.Join(policy.Actions, ":") + return nil +} + +func (ccm *channelCacheMock) Get(_ context.Context, policy policies.Policy) (policies.Policy, error) { + ccm.mu.Lock() + defer ccm.mu.Unlock() + actions := ccm.policies[fmt.Sprintf("%s:%s", policy.Subject, policy.Object)] + + if actions != "" { + return policies.Policy{ + Subject: policy.Subject, + Object: policy.Object, + Actions: strings.Split(actions, ":"), + }, nil + } + + return policies.Policy{}, errors.ErrNotFound +} + +func (ccm *channelCacheMock) Remove(_ context.Context, policy policies.Policy) error { + ccm.mu.Lock() + defer ccm.mu.Unlock() + + delete(ccm.policies, fmt.Sprintf("%s:%s", policy.Subject, policy.Object)) + return nil +} diff --git a/things/policies/mocks/policies.go b/things/policies/mocks/policies.go new file mode 100644 index 0000000000..e7b4014d26 --- /dev/null +++ b/things/policies/mocks/policies.go @@ -0,0 +1,48 @@ +package mocks + +import ( + "context" + + "github.com/mainflux/mainflux/things/policies" + "github.com/stretchr/testify/mock" +) + +type PolicyRepository struct { + mock.Mock +} + +func (m *PolicyRepository) Delete(ctx context.Context, p policies.Policy) error { + ret := m.Called(ctx, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) Retrieve(ctx context.Context, pm policies.Page) (policies.PolicyPage, error) { + ret := m.Called(ctx, pm) + + return ret.Get(0).(policies.PolicyPage), ret.Error(1) +} + +func (m *PolicyRepository) Save(ctx context.Context, p policies.Policy) (policies.Policy, error) { + ret := m.Called(ctx, p) + + return ret.Get(0).(policies.Policy), ret.Error(1) +} + +func (m *PolicyRepository) Update(ctx context.Context, p policies.Policy) (policies.Policy, error) { + ret := m.Called(ctx, p) + + return ret.Get(0).(policies.Policy), ret.Error(1) +} + +func (m *PolicyRepository) Evaluate(ctx context.Context, entityType string, p policies.Policy) error { + ret := m.Called(ctx, entityType, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) RetrieveOne(ctx context.Context, subject, object string) (policies.Policy, error) { + ret := m.Called(ctx, subject, object) + + return ret.Get(0).(policies.Policy), ret.Error(1) +} diff --git a/things/policies/page.go b/things/policies/page.go new file mode 100644 index 0000000000..f88664ce36 --- /dev/null +++ b/things/policies/page.go @@ -0,0 +1,28 @@ +package policies + +import "github.com/mainflux/mainflux/internal/apiutil" + +// Metadata represents arbitrary JSON. +type Metadata map[string]interface{} + +// Page contains page metadata that helps navigation. +type Page struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + OwnerID string `json:"owner,omitempty"` + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` + Action string `json:"action,omitempty"` + Metadata Metadata `json:"metadata,omitempty"` +} + +// Validate check page actions. +func (p Page) Validate() error { + if p.Action != "" { + if ok := ValidateAction(p.Action); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + return nil +} diff --git a/things/policies/policies.go b/things/policies/policies.go new file mode 100644 index 0000000000..97d88282e4 --- /dev/null +++ b/things/policies/policies.go @@ -0,0 +1,132 @@ +package policies + +import ( + "context" + "time" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/users/policies" +) + +// PolicyTypes contains a list of the available policy types currently supported +var PolicyTypes = []string{WriteAction, ReadAction} + +// Policy represents an argument struct for making a policy related function calls. +type Policy struct { + OwnerID string `json:"owner_id"` + Subject string `json:"subject"` + Object string `json:"object"` + Actions []string `json:"actions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + UpdatedBy string `json:"updated_by"` +} + +// AccessRequest represents an access control request for Authorization. +type AccessRequest struct { + Subject string `json:"subject"` + Object string `json:"object"` + Action string `json:"action"` +} + +// PolicyPage contains a page of policies. +type PolicyPage struct { + Page + Policies []Policy +} + +// Repository specifies an account persistence API. +type Repository interface { + // Save creates a policy for the given Subject, so that, after + // Save, `Subject` has a `relation` on `group_id`. Returns a non-nil + // error in case of failures. + Save(ctx context.Context, p Policy) (Policy, error) + + // Evaluate is used to evaluate if you have the correct permissions. + // We evaluate if we are in the same group first then evaluate if the + // object has that action over the subject + Evaluate(ctx context.Context, entityType string, p Policy) error + + // RetrieveOne retrieves policy by subject and object. + RetrieveOne(ctx context.Context, subject, object string) (Policy, error) + + // Update updates the policy type. + Update(ctx context.Context, p Policy) (Policy, error) + + // Retrieve retrieves policy for a given input. + Retrieve(ctx context.Context, pm Page) (PolicyPage, error) + + // Delete deletes the policy + Delete(ctx context.Context, p Policy) error +} + +// Service represents a authorization service. It exposes +// functionalities through `auth` to perform authorization. +type Service interface { + // Authorize checks authorization of the given `subject`. + // Authorize verifies that Is `subject` allowed to `relation` on + // `object`. Authorize returns a non-nil error if the subject has + // no relation on the object (which simply means the operation is + // denied). + Authorize(ctx context.Context, ar AccessRequest, entity string) (string, error) + + // AddPolicy creates a policy for the given subject, so that, after + // AddPolicy, `subject` has a `relation` on `object`. Returns a non-nil + // error in case of failures. + AddPolicy(ctx context.Context, token string, p Policy) (Policy, error) + + // DeletePolicy removes a policy. + DeletePolicy(ctx context.Context, token string, p Policy) error + + // UpdatePolicy updates an existing policy + UpdatePolicy(ctx context.Context, token string, p Policy) (Policy, error) + + // ListPolicies lists existing policies + ListPolicies(ctx context.Context, token string, p Page) (PolicyPage, error) +} + +// Cache contains channel-thing connection caching interface. +type Cache interface { + // Put connects group to a client with the specified action. + Put(ctx context.Context, policy Policy) error + + // Get checks if a client is connected to group. + Get(ctx context.Context, policy Policy) (Policy, error) + + // Remove deletes a client connection to a group. + Remove(ctx context.Context, policy Policy) error +} + +// Validate returns an error if policy representation is invalid. +func (p Policy) Validate() error { + if p.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if p.Object == "" { + return apiutil.ErrMissingPolicyObj + } + if len(p.Actions) == 0 { + return apiutil.ErrMalformedPolicyAct + } + for _, p := range p.Actions { + // Validate things policies first + if ok := ValidateAction(p); !ok { + // Validate users policies for clients connected to a group + if ok := policies.ValidateAction(p); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + } + return nil +} + +// ValidateAction check if the action is in policies +func ValidateAction(act string) bool { + for _, v := range PolicyTypes { + if v == act { + return true + } + } + return false + +} diff --git a/things/policies/postgres/doc.go b/things/policies/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/things/policies/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/things/policies/postgres/policies.go b/things/policies/postgres/policies.go new file mode 100644 index 0000000000..6d9ab50534 --- /dev/null +++ b/things/policies/postgres/policies.go @@ -0,0 +1,297 @@ +package postgres + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/jackc/pgtype" + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" +) + +var _ policies.Repository = (*prepo)(nil) + +var ( + // ErrInvalidEntityType indicates that the entity type is invalid. + ErrInvalidEntityType = errors.New("invalid entity type") +) + +type prepo struct { + db postgres.Database +} + +// NewRepository instantiates a PostgreSQL implementation of policy repository. +func NewRepository(db postgres.Database) policies.Repository { + return &prepo{ + db: db, + } +} + +func (pr prepo) Save(ctx context.Context, policy policies.Policy) (policies.Policy, error) { + q := `INSERT INTO policies (owner_id, subject, object, actions, created_at, updated_at, updated_by) + VALUES (:owner_id, :subject, :object, :actions, :created_at, :updated_at, :updated_by) + RETURNING owner_id, subject, object, actions, created_at, updated_at, updated_by;` + + dbp, err := toDBPolicy(policy) + if err != nil { + return policies.Policy{}, errors.Wrap(errors.ErrCreateEntity, err) + } + + row, err := pr.db.NamedQueryContext(ctx, q, dbp) + if err != nil { + return policies.Policy{}, postgres.HandleError(err, errors.ErrCreateEntity) + } + + defer row.Close() + row.Next() + dbp = dbPolicy{} + if err := row.StructScan(&dbp); err != nil { + return policies.Policy{}, err + } + + return toPolicy(dbp) +} + +func (pr prepo) RetrieveOne(ctx context.Context, subject, object string) (policies.Policy, error) { + q := `SELECT subject, object, actions + FROM policies p INNER JOIN clients c ON c.id = p.subject + WHERE c.secret = :subject AND p.object = :object` + params := struct { + Subject string `db:"subject"` + Object string `db:"object"` + }{ + Subject: subject, + Object: object, + } + row, err := pr.db.NamedQueryContext(ctx, q, params) + if err != nil { + return policies.Policy{}, postgres.HandleError(err, errors.ErrAuthorization) + } + + defer row.Close() + + if ok := row.Next(); !ok { + return policies.Policy{}, errors.Wrap(errors.ErrAuthorization, row.Err()) + } + var p dbPolicy + if err := row.StructScan(&p); err != nil { + return policies.Policy{}, err + } + return toPolicy(p) +} + +func (pr prepo) Evaluate(ctx context.Context, entityType string, policy policies.Policy) error { + q := "" + switch entityType { + case "client": + // Evaluates if two clients are connected to the same group and the subject has the specified action + // or subject is the owner of the object + q = fmt.Sprintf(`SELECT COALESCE(p.subject, c.id) as subject FROM policies p + JOIN policies p2 ON p.object = p2.object LEFT JOIN clients c ON c.owner_id = :subject AND c.id = :object + WHERE (p.subject = :subject AND p2.subject = :object AND '%s' = ANY(p.actions)) OR (c.id IS NOT NULL) LIMIT 1;`, + policy.Actions[0]) + case "group": + // Evaluates if client is connected to the specified group and has the required action + q = fmt.Sprintf(`SELECT DISTINCT policies.subject FROM policies + LEFT JOIN groups ON groups.owner_id = policies.subject AND groups.id = policies.object + WHERE policies.subject = :subject AND policies.object = :object AND '%s' = ANY(policies.actions) + LIMIT 1`, policy.Actions[0]) + default: + return ErrInvalidEntityType + } + dbu, err := toDBPolicy(policy) + if err != nil { + return errors.Wrap(errors.ErrAuthorization, err) + } + row, err := pr.db.NamedQueryContext(ctx, q, dbu) + if err != nil { + return postgres.HandleError(err, errors.ErrAuthorization) + } + + defer row.Close() + + if ok := row.Next(); !ok { + return errors.Wrap(errors.ErrAuthorization, row.Err()) + } + var rPolicy dbPolicy + if err := row.StructScan(&rPolicy); err != nil { + return err + } + return nil +} + +func (pr prepo) Update(ctx context.Context, policy policies.Policy) (policies.Policy, error) { + q := `UPDATE policies SET actions = :actions, updated_at = :updated_at, updated_by = :updated_by + WHERE subject = :subject AND object = :object + RETURNING owner_id, subject, object, actions, created_at, updated_at;` + + dbp, err := toDBPolicy(policy) + if err != nil { + return policies.Policy{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + + row, err := pr.db.NamedQueryContext(ctx, q, dbp) + if err != nil { + return policies.Policy{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return policies.Policy{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbp = dbPolicy{} + if err := row.StructScan(&dbp); err != nil { + return policies.Policy{}, errors.Wrap(err, errors.ErrUpdateEntity) + } + return toPolicy(dbp) +} + +func (pr prepo) Retrieve(ctx context.Context, pm policies.Page) (policies.PolicyPage, error) { + var query []string + var emq string + + if pm.OwnerID != "" { + query = append(query, "owner_id = :owner_id") + } + if pm.Subject != "" { + query = append(query, "subject = :subject") + } + if pm.Object != "" { + query = append(query, "object = :object") + } + if pm.Action != "" { + query = append(query, ":action = ANY (actions)") + } + + if len(query) > 0 { + emq = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) + } + + q := fmt.Sprintf(`SELECT owner_id, subject, object, actions + FROM policies %s ORDER BY updated_at LIMIT :limit OFFSET :offset;`, emq) + + dbPage, err := toDBPoliciesPage(pm) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + rows, err := pr.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + defer rows.Close() + + var items []policies.Policy + for rows.Next() { + dbp := dbPolicy{} + if err := rows.StructScan(&dbp); err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + policy, err := toPolicy(dbp) + if err != nil { + return policies.PolicyPage{}, err + } + + items = append(items, policy) + } + + cq := fmt.Sprintf(`SELECT COUNT(*) FROM policies %s;`, emq) + + total, err := postgres.Total(ctx, pr.db, cq, dbPage) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + page := policies.PolicyPage{ + Policies: items, + Page: policies.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (pr prepo) Delete(ctx context.Context, p policies.Policy) error { + dbp := dbPolicy{ + Subject: p.Subject, + Object: p.Object, + } + q := `DELETE FROM policies WHERE subject = :subject AND object = :object` + if _, err := pr.db.NamedExecContext(ctx, q, dbp); err != nil { + return errors.Wrap(errors.ErrRemoveEntity, err) + } + return nil +} + +type dbPolicy struct { + OwnerID string `db:"owner_id"` + Subject string `db:"subject"` + Object string `db:"object"` + Actions pgtype.TextArray `db:"actions"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + UpdatedBy string `db:"updated_by"` +} + +func toDBPolicy(p policies.Policy) (dbPolicy, error) { + var ps pgtype.TextArray + if err := ps.Set(p.Actions); err != nil { + return dbPolicy{}, err + } + + return dbPolicy{ + OwnerID: p.OwnerID, + Subject: p.Subject, + Object: p.Object, + Actions: ps, + CreatedAt: p.CreatedAt, + UpdatedAt: p.UpdatedAt, + UpdatedBy: p.UpdatedBy, + }, nil +} + +func toPolicy(dbp dbPolicy) (policies.Policy, error) { + var ps []string + for _, e := range dbp.Actions.Elements { + ps = append(ps, e.String) + } + + return policies.Policy{ + OwnerID: dbp.OwnerID, + Subject: dbp.Subject, + Object: dbp.Object, + Actions: ps, + CreatedAt: dbp.CreatedAt, + UpdatedAt: dbp.UpdatedAt, + UpdatedBy: dbp.UpdatedBy, + }, nil +} + +func toDBPoliciesPage(pm policies.Page) (dbPoliciesPage, error) { + return dbPoliciesPage{ + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + OwnerID: pm.OwnerID, + Subject: pm.Subject, + Object: pm.Object, + Action: pm.Action, + }, nil +} + +type dbPoliciesPage struct { + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + OwnerID string `db:"owner_id"` + Subject string `db:"subject"` + Object string `db:"object"` + Action string `db:"action"` +} diff --git a/things/policies/postgres/policies_test.go b/things/policies/postgres/policies_test.go new file mode 100644 index 0000000000..81acc99406 --- /dev/null +++ b/things/policies/postgres/policies_test.go @@ -0,0 +1,662 @@ +package postgres_test + +import ( + "context" + "fmt" + "testing" + + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/things/clients/postgres" + gpostgres "github.com/mainflux/mainflux/things/groups/postgres" + "github.com/mainflux/mainflux/things/policies" + ppostgres "github.com/mainflux/mainflux/things/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() +) + +func TestPoliciesSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + group := mfgroups.Group{ + ID: uid, + Name: "policy-save@example.com", + Status: mfclients.EnabledStatus, + } + + _, err := grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + uid = testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + policy policies.Policy + err error + }{ + { + desc: "add new policy successfully", + policy: policies.Policy{ + OwnerID: testsutil.GenerateUUID(t, idProvider), + Subject: uid, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + err: nil, + }, + { + desc: "add policy with duplicate subject, object and action", + policy: policies.Policy{ + OwnerID: testsutil.GenerateUUID(t, idProvider), + Subject: uid, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + err: errors.ErrConflict, + }, + } + + for _, tc := range cases { + _, err := repo.Save(context.Background(), tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestPoliciesEvaluate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewRepository(database) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connectedclients-clientA@example.com", + Credentials: mfclients.Credentials{ + Identity: "connectedclients-clientA@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connectedclients-clientB@example.com", + Credentials: mfclients.Credentials{ + Identity: "connectedclients-clientB@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connecting-group@example.com", + } + + _, err := crepo.Save(context.Background(), client1) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + _, err = crepo.Save(context.Background(), client2) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + group, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy1 := policies.Policy{ + OwnerID: client1.ID, + Subject: client1.ID, + Object: group.ID, + Actions: []string{"c_update", "g_update"}, + } + policy2 := policies.Policy{ + OwnerID: client2.ID, + Subject: client2.ID, + Object: group.ID, + Actions: []string{"c_update", "g_update"}, + } + _, err = repo.Save(context.Background(), policy1) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + _, err = repo.Save(context.Background(), policy2) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + Action string + Domain string + err error + }{ + "evaluate valid client update": {client1.ID, client2.ID, "c_update", "client", nil}, + "evaluate valid group update": {client1.ID, group.ID, "g_update", "group", nil}, + "evaluate valid client list": {client1.ID, client2.ID, "c_list", "client", errors.ErrAuthorization}, + "evaluate valid group list": {client1.ID, group.ID, "g_list", "group", errors.ErrAuthorization}, + "evaluate invalid client delete": {client1.ID, client2.ID, "c_delete", "client", errors.ErrAuthorization}, + "evaluate invalid group delete": {client1.ID, group.ID, "g_delete", "group", errors.ErrAuthorization}, + "evaluate invalid client update": {"unknown", "unknown", "c_update", "client", errors.ErrAuthorization}, + "evaluate invalid group update": {"unknown", "unknown", "c_update", "group", errors.ErrAuthorization}, + } + + for desc, tc := range cases { + p := policies.Policy{ + Subject: tc.Subject, + Object: tc.Object, + Actions: []string{tc.Action}, + } + err := repo.Evaluate(context.Background(), tc.Domain, p) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } +} + +func TestPoliciesRetrieve(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewRepository(database) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + client := mfclients.Client{ + ID: uid, + Name: "single-policy-retrieval@example.com", + Credentials: mfclients.Credentials{ + Identity: "single-policy-retrieval@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + + _, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-save@example.com", + Status: mfclients.EnabledStatus, + } + _, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy := policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + } + + _, err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + err error + }{ + "retrieve existing policy": {uid, uid, nil}, + "retrieve non-existing policy": {"unknown", "unknown", nil}, + } + + for desc, tc := range cases { + pm := policies.Page{ + Subject: tc.Subject, + Object: tc.Object, + } + _, err := repo.Retrieve(context.Background(), pm) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } +} + +func TestPoliciesUpdate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewRepository(database) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-update@example.com", + Credentials: mfclients.Credentials{ + Identity: "policy-update@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + _, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error during saving client: %s", err)) + + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-save@example.com", + Status: mfclients.EnabledStatus, + } + _, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy := policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + } + _, err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error during saving policy: %s", err)) + + cases := []struct { + desc string + policy policies.Policy + resp policies.Policy + err error + }{ + { + desc: "update policy successfully", + policy: policies.Policy{ + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_update"}, + }, + resp: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_update"}, + }, + err: nil, + }, + { + desc: "update policy with missing owner id", + policy: policies.Policy{ + OwnerID: "", + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + resp: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + err: nil, + }, + { + desc: "update policy with missing subject", + policy: policies.Policy{ + OwnerID: client.ID, + Subject: "", + Object: group.ID, + Actions: []string{"c_add"}, + }, + resp: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + err: errors.ErrNotFound, + }, + { + desc: "update policy with missing object", + policy: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: "", + Actions: []string{"c_add"}, + }, + resp: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + }, + + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + _, err := repo.Update(context.Background(), tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + policPage, err := repo.Retrieve(context.Background(), policies.Page{ + Offset: uint64(0), + Limit: uint64(10), + Subject: tc.policy.Subject, + }) + if err == nil { + assert.Equal(t, tc.resp, policPage.Policies[0], fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestPoliciesRetrievalAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := ppostgres.NewRepository(database) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + var nPolicies = uint64(10) + + clientA := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policyA-retrievalall@example.com", + Credentials: mfclients.Credentials{ + Identity: "policyA-retrievalall@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + clientB := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policyB-retrievalall@example.com", + Credentials: mfclients.Credentials{ + Identity: "policyB-retrievalall@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + + _, err := crepo.Save(context.Background(), clientA) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + _, err = crepo.Save(context.Background(), clientB) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + groupID := "" + for i := uint64(0); i < nPolicies; i++ { + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: fmt.Sprintf("TestRetrieveAll%d@example.com", i), + Status: mfclients.EnabledStatus, + } + if i == 0 { + groupID = group.ID + } + _, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + if i%2 == 0 { + policy := policies.Policy{ + OwnerID: clientA.ID, + Subject: clientA.ID, + Object: group.ID, + Actions: []string{"c_delete"}, + } + _, err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + policy := policies.Policy{ + Subject: clientB.ID, + Object: group.ID, + Actions: []string{"c_add", "c_update"}, + } + _, err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + + cases := map[string]struct { + size uint64 + pm policies.Page + }{ + "retrieve all policies with limit and offset": { + pm: policies.Page{ + Offset: 5, + Limit: nPolicies, + }, + size: 10, + }, + "retrieve all policies by Subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Subject: clientA.ID, + }, + size: 5, + }, + "retrieve policies by wrong Subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Subject: "wrongSubject", + }, + size: 0, + }, + + "retrieve all policies by Object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Object: groupID, + }, + size: 2, + }, + "retrieve policies by wrong Object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve all policies by Action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Action: "c_delete", + }, + size: 5, + }, + "retrieve policies by wrong Action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Action: "wrongAction", + }, + size: 0, + }, + "retrieve all policies by owner id and subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Subject: clientA.ID, + }, + size: 5, + }, + "retrieve policies by wrong owner id and correct subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Subject: clientA.ID, + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Subject: "wrongSubject", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + }, + size: 0, + }, + "retrieve all policies by owner id and object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Object: groupID, + }, + size: 1, + }, + "retrieve policies by wrong owner id and correct object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Object: groupID, + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve all policies by owner id and action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Action: "c_delete", + }, + size: 5, + }, + "retrieve policies by wrong owner id and correct action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Action: "c_delete", + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Action: "wrongAction", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Action: "wrongAction", + }, + size: 0, + }, + } + for desc, tc := range cases { + page, err := repo.Retrieve(context.Background(), tc.pm) + size := uint64(len(page.Policies)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestPoliciesDelete(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewRepository(database) + crepo := cpostgres.NewRepository(database) + grepo := gpostgres.NewRepository(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-delete@example.com", + Credentials: mfclients.Credentials{ + Identity: "policy-delete@example.com", + Secret: testsutil.GenerateUUID(t, idProvider), + }, + Status: mfclients.EnabledStatus, + } + + subject, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-save@example.com", + Status: mfclients.EnabledStatus, + } + _, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy := policies.Policy{ + OwnerID: subject[0].ID, + Subject: subject[0].ID, + Object: group.ID, + Actions: []string{"c_delete"}, + } + + _, err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + err error + }{ + "delete non-existing policy": {"unknown", "unknown", nil}, + "delete non-existing policy with correct subject": {subject[0].ID, "unknown", nil}, + "delete non-existing policy with correct object": {"unknown", group.ID, nil}, + "delete existing policy": {subject[0].ID, group.ID, nil}, + } + + for desc, tc := range cases { + policy := policies.Policy{ + Subject: tc.Subject, + Object: tc.Object, + } + err := repo.Delete(context.Background(), policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } + pm := policies.Page{ + OwnerID: subject[0].ID, + Subject: subject[0].ID, + Object: group.ID, + Action: "c_delete", + } + policyPage, err := repo.Retrieve(context.Background(), pm) + assert.Equal(t, uint64(0), policyPage.Total, fmt.Sprintf("retrieve policies unexpected total %d\n", policyPage.Total)) + require.Nil(t, err, fmt.Sprintf("retrieve policies unexpected error: %s", err)) +} diff --git a/things/policies/postgres/setup_test.go b/things/policies/postgres/setup_test.go new file mode 100644 index 0000000000..890fea7da2 --- /dev/null +++ b/things/policies/postgres/setup_test.go @@ -0,0 +1,92 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + tpostgres "github.com/mainflux/mainflux/things/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgClient.SetupDB(dbConfig, *tpostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/things/policies/redis/doc.go b/things/policies/redis/doc.go new file mode 100644 index 0000000000..3b7b7486ae --- /dev/null +++ b/things/policies/redis/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +// Package redis contains cache implementations using Redis as +// the underlying database. +package redis diff --git a/things/policies/redis/events.go b/things/policies/redis/events.go new file mode 100644 index 0000000000..b465c36620 --- /dev/null +++ b/things/policies/redis/events.go @@ -0,0 +1,108 @@ +package redis + +import ( + "fmt" + "strings" + + "github.com/mainflux/mainflux/things/policies" +) + +const ( + policyPrefix = "policies." + authorize = policyPrefix + "authorize" + policyAdd = policyPrefix + "add" + policyUpdate = policyPrefix + "update" + policyList = policyPrefix + "list" + policyDelete = policyPrefix + "delete" +) + +type event interface { + Encode() (map[string]interface{}, error) +} + +var ( + _ event = (*policyEvent)(nil) + _ event = (*authorizeEvent)(nil) + _ event = (*listPoliciesEvent)(nil) +) + +type policyEvent struct { + policies.Policy + operation string +} + +func (pe policyEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": pe.operation, + } + if pe.OwnerID != "" { + val["owner_id"] = pe.OwnerID + } + if pe.Subject != "" { + val["subject"] = pe.Subject + } + if pe.Object != "" { + val["object"] = pe.Object + } + if len(pe.Actions) > 0 { + actions := fmt.Sprintf("[%s]", strings.Join(pe.Actions, ",")) + val["actions"] = actions + } + if !pe.CreatedAt.IsZero() { + val["created_at"] = pe.CreatedAt + } + if !pe.UpdatedAt.IsZero() { + val["updated_at"] = pe.UpdatedAt + } + if pe.UpdatedBy != "" { + val["updated_by"] = pe.UpdatedBy + } + return val, nil +} + +type authorizeEvent struct { + policies.AccessRequest + entityType string +} + +func (ae authorizeEvent) Encode() (map[string]interface{}, error) { + // We don't want to send the key over the stream, so we don't send the subject. + val := map[string]interface{}{ + "operation": authorize, + "entity_type": ae.entityType, + } + + if ae.Object != "" { + val["object"] = ae.Object + } + if ae.Action != "" { + val["actions"] = ae.Action + } + return val, nil +} + +type listPoliciesEvent struct { + policies.Page +} + +func (ae listPoliciesEvent) Encode() (map[string]interface{}, error) { + val := map[string]interface{}{ + "operation": policyList, + "total": ae.Total, + "limit": ae.Limit, + "offset": ae.Offset, + } + if ae.OwnerID != "" { + val["owner_id"] = ae.OwnerID + } + if ae.Subject != "" { + val["subject"] = ae.Subject + } + if ae.Object != "" { + val["object"] = ae.Object + } + if ae.Action != "" { + val["action"] = ae.Action + } + return val, nil +} diff --git a/things/policies/redis/policies.go b/things/policies/redis/policies.go new file mode 100644 index 0000000000..e226fa0495 --- /dev/null +++ b/things/policies/redis/policies.go @@ -0,0 +1,69 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import ( + "context" + "fmt" + "strings" + + "github.com/go-redis/redis/v8" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/policies" +) + +const ( + separator = ":" + keyDuration = 0 +) + +var _ policies.Cache = (*pcache)(nil) + +type pcache struct { + client *redis.Client +} + +// NewCache returns redis policy cache implementation. +func NewCache(client *redis.Client) policies.Cache { + return pcache{client: client} +} + +func (cc pcache) Put(ctx context.Context, policy policies.Policy) error { + k, v := kv(policy) + if err := cc.client.Set(ctx, k, v, keyDuration).Err(); err != nil { + return errors.Wrap(errors.ErrCreateEntity, err) + } + return nil +} + +func (cc pcache) Get(ctx context.Context, policy policies.Policy) (policies.Policy, error) { + k, _ := kv(policy) + res := cc.client.Get(ctx, k) + // Nil response indicates non-existent key in Redis client. + if res == nil || res.Err() == redis.Nil { + return policies.Policy{}, errors.ErrNotFound + } + if err := res.Err(); err != nil { + return policies.Policy{}, err + } + actions, err := res.Result() + if err != nil { + return policies.Policy{}, err + } + policy.Actions = strings.Split(actions, separator) + return policy, nil +} + +func (cc pcache) Remove(ctx context.Context, policy policies.Policy) error { + obj, _ := kv(policy) + if err := cc.client.Del(ctx, obj).Err(); err != nil { + return errors.Wrap(errors.ErrRemoveEntity, err) + } + return nil +} + +// Generates key-value pair for Redis client. +func kv(p policies.Policy) (string, string) { + return fmt.Sprintf("%s%s%s", p.Subject, separator, p.Object), strings.Join(p.Actions, separator) +} diff --git a/things/policies/redis/streams.go b/things/policies/redis/streams.go new file mode 100644 index 0000000000..ff4f334868 --- /dev/null +++ b/things/policies/redis/streams.go @@ -0,0 +1,156 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package redis + +import ( + "context" + + "github.com/go-redis/redis/v8" + "github.com/mainflux/mainflux/things/policies" +) + +const ( + streamID = "mainflux.things" + streamLen = 1000 +) + +var _ policies.Service = (*eventStore)(nil) + +type eventStore struct { + svc policies.Service + client *redis.Client +} + +// NewEventStoreMiddleware returns wrapper around policy service that sends +// events to event store. +func NewEventStoreMiddleware(svc policies.Service, client *redis.Client) policies.Service { + return eventStore{ + svc: svc, + client: client, + } +} + +func (es eventStore) Authorize(ctx context.Context, ar policies.AccessRequest, entity string) (string, error) { + id, err := es.svc.Authorize(ctx, ar, entity) + if err != nil { + return "", err + } + + event := authorizeEvent{ + ar, entity, + } + values, err := event.Encode() + if err != nil { + return id, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return id, err + } + + return id, nil +} + +func (es eventStore) AddPolicy(ctx context.Context, token string, policy policies.Policy) (policies.Policy, error) { + policy, err := es.svc.AddPolicy(ctx, token, policy) + if err != nil { + return policies.Policy{}, err + } + + event := policyEvent{ + policy, policyAdd, + } + values, err := event.Encode() + if err != nil { + return policy, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return policy, err + } + + return policy, nil +} + +func (es eventStore) UpdatePolicy(ctx context.Context, token string, policy policies.Policy) (policies.Policy, error) { + policy, err := es.svc.UpdatePolicy(ctx, token, policy) + if err != nil { + return policies.Policy{}, err + } + + event := policyEvent{ + policy, policyUpdate, + } + values, err := event.Encode() + if err != nil { + return policy, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return policy, err + } + + return policy, nil +} + +func (es eventStore) ListPolicies(ctx context.Context, token string, page policies.Page) (policies.PolicyPage, error) { + policypage, err := es.svc.ListPolicies(ctx, token, page) + if err != nil { + return policies.PolicyPage{}, err + } + + event := listPoliciesEvent{ + page, + } + values, err := event.Encode() + if err != nil { + return policypage, err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return policypage, err + } + + return policypage, nil +} + +func (es eventStore) DeletePolicy(ctx context.Context, token string, policy policies.Policy) error { + if err := es.svc.DeletePolicy(ctx, token, policy); err != nil { + return err + } + + event := policyEvent{ + policy, policyDelete, + } + values, err := event.Encode() + if err != nil { + return err + } + record := &redis.XAddArgs{ + Stream: streamID, + MaxLenApprox: streamLen, + Values: values, + } + if err := es.client.XAdd(ctx, record).Err(); err != nil { + return err + } + + return nil +} diff --git a/things/policies/service.go b/things/policies/service.go new file mode 100644 index 0000000000..b616f56930 --- /dev/null +++ b/things/policies/service.go @@ -0,0 +1,175 @@ +package policies + +import ( + "context" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/things/clients" + upolicies "github.com/mainflux/mainflux/users/policies" +) + +// Possible token types are access and refresh tokens. +const ( + ReadAction = "m_read" + WriteAction = "m_write" + ClientEntityType = "client" + GroupEntityType = "group" +) + +type service struct { + auth upolicies.AuthServiceClient + policies Repository + policyCache Cache + idProvider mainflux.IDProvider +} + +// NewService returns a new Clients service implementation. +func NewService(auth upolicies.AuthServiceClient, p Repository, tcache clients.ClientCache, ccache Cache, idp mainflux.IDProvider) Service { + return service{ + auth: auth, + policies: p, + policyCache: ccache, + idProvider: idp, + } +} + +func (svc service) Authorize(ctx context.Context, ar AccessRequest, entity string) (string, error) { + // fetch from cache first + p := Policy{ + Subject: ar.Subject, + Object: ar.Object, + } + policy, err := svc.policyCache.Get(ctx, p) + if err == nil { + for _, action := range policy.Actions { + if action == ar.Action { + return policy.Subject, nil + } + } + return "", errors.ErrAuthorization + } + if !errors.Contains(err, errors.ErrNotFound) { + return "", err + } + // fetch from repo as a fallback if not found in cache + policy, err = svc.policies.RetrieveOne(ctx, p.Subject, p.Object) + if err != nil { + return "", err + } + + // Replace Subject since AccessRequest Subject is Thing Key, + // and Policy subject is Thing ID. + policy.Subject = ar.Subject + + for _, action := range policy.Actions { + if action == ar.Action { + if err := svc.policyCache.Put(ctx, policy); err != nil { + return policy.Subject, err + } + + return policy.Subject, nil + } + } + return "", errors.ErrAuthorization + +} + +func (svc service) AddPolicy(ctx context.Context, token string, p Policy) (Policy, error) { + res, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return Policy{}, errors.Wrap(errors.ErrAuthentication, err) + } + + if err := p.Validate(); err != nil { + return Policy{}, err + } + + p.OwnerID = res.GetId() + p.CreatedAt = time.Now() + + p, err = svc.policies.Save(ctx, p) + if err != nil { + return Policy{}, err + } + + if err := svc.policyCache.Put(ctx, p); err != nil { + return p, err + } + return p, nil +} + +func (svc service) UpdatePolicy(ctx context.Context, token string, p Policy) (Policy, error) { + res, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return Policy{}, errors.Wrap(errors.ErrAuthentication, err) + } + if err := p.Validate(); err != nil { + return Policy{}, err + } + if err := svc.checkAction(ctx, res.GetId(), p); err != nil { + return Policy{}, err + } + p.UpdatedAt = time.Now() + p.UpdatedBy = res.GetId() + + return svc.policies.Update(ctx, p) +} + +func (svc service) ListPolicies(ctx context.Context, token string, pm Page) (PolicyPage, error) { + if _, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}); err != nil { + return PolicyPage{}, errors.Wrap(errors.ErrAuthentication, err) + } + if err := pm.Validate(); err != nil { + return PolicyPage{}, err + } + + page, err := svc.policies.Retrieve(ctx, pm) + if err != nil { + return PolicyPage{}, err + } + + return page, err +} + +func (svc service) DeletePolicy(ctx context.Context, token string, p Policy) error { + res, err := svc.auth.Identify(ctx, &upolicies.Token{Value: token}) + if err != nil { + return errors.Wrap(errors.ErrAuthentication, err) + } + if err := svc.checkAction(ctx, res.GetId(), p); err != nil { + return err + } + if err := svc.policyCache.Remove(ctx, p); err != nil { + return err + } + return svc.policies.Delete(ctx, p) +} + +// checkAction check if client updating the policy has the sufficient priviledges. +// If the client is the owner of the policy. +// If the client is the admin. +func (svc service) checkAction(ctx context.Context, clientID string, p Policy) error { + pm := Page{Subject: p.Subject, Object: p.Object, OwnerID: clientID, Total: 1, Offset: 0} + page, err := svc.policies.Retrieve(ctx, pm) + if err != nil { + return err + } + if len(page.Policies) != 1 { + return errors.ErrAuthorization + } + // If the client is the owner of the policy + if page.Policies[0].OwnerID == clientID { + return nil + } + + // If the client is the admin + req := &upolicies.AuthorizeReq{Sub: clientID, Obj: p.Object, Act: p.Actions[0], EntityType: "client"} + if _, err := svc.auth.Authorize(ctx, req); err == nil { + return nil + } + + return errors.ErrAuthorization + +} diff --git a/things/policies/service_test.go b/things/policies/service_test.go new file mode 100644 index 0000000000..240b259762 --- /dev/null +++ b/things/policies/service_test.go @@ -0,0 +1,341 @@ +package policies_test + +import ( + context "context" + fmt "fmt" + "testing" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/testsutil" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/things/clients" + "github.com/mainflux/mainflux/things/clients/mocks" + "github.com/mainflux/mainflux/things/policies" + pmocks "github.com/mainflux/mainflux/things/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + inValidToken = "invalidToken" + memberActions = []string{"g_list"} + adminEmail = "admin@example.com" + token = "token" +) + +func newService(tokens map[string]string) (policies.Service, *pmocks.PolicyRepository) { + adminPolicy := mocks.MockSubjectSet{Object: "authorities", Relation: clients.AdminRelationKey} + auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{adminEmail: {adminPolicy}}) + idProvider := uuid.NewMock() + thingsCache := mocks.NewClientCache() + policiesCache := pmocks.NewChannelCache() + pRepo := new(pmocks.PolicyRepository) + + return policies.NewService(auth, pRepo, thingsCache, policiesCache, idProvider), pRepo +} + +func TestAddPolicy(t *testing.T) { + svc, pRepo := newService(map[string]string{token: adminEmail}) + + policy := policies.Policy{Object: "obj1", Actions: []string{"m_read"}, Subject: "sub1"} + + cases := []struct { + desc string + policy policies.Policy + page policies.PolicyPage + token string + err error + }{ + { + desc: "add new policy", + policy: policy, + page: policies.PolicyPage{}, + token: token, + err: nil, + }, + { + desc: "add existing policy", + policy: policy, + page: policies.PolicyPage{Policies: []policies.Policy{policy}}, + token: token, + err: errors.ErrConflict, + }, + { + desc: "add a new policy with owner", + page: policies.PolicyPage{}, + policy: policies.Policy{ + OwnerID: testsutil.GenerateUUID(t, idProvider), + Object: "objwithowner", + Actions: []string{"m_read"}, + Subject: "subwithowner", + }, + err: nil, + token: token, + }, + { + desc: "add a new policy with more actions", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Object: "obj2", + Actions: []string{"c_delete", "c_update", "c_add", "c_list"}, + Subject: "sub2", + }, + err: nil, + token: token, + }, + { + desc: "add a new policy with wrong action", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Object: "obj3", + Actions: []string{"wrong"}, + Subject: "sub3", + }, + err: apiutil.ErrMalformedPolicyAct, + token: token, + }, + { + desc: "add a new policy with empty object", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Actions: []string{"c_delete"}, + Subject: "sub4", + }, + err: apiutil.ErrMissingPolicyObj, + token: token, + }, + { + desc: "add a new policy with empty subject", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Actions: []string{"c_delete"}, + Object: "obj4", + }, + err: apiutil.ErrMissingPolicySub, + token: token, + }, + { + desc: "add a new policy with empty action", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Subject: "sub5", + Object: "obj5", + }, + err: apiutil.ErrMalformedPolicyAct, + token: token, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("RetrieveOne", context.Background(), mock.Anything, mock.Anything).Return(tc.policy, tc.err) + repoCall1 := pRepo.On("Evaluate", context.Background(), "client", mock.Anything).Return(nil) + repoCall2 := pRepo.On("Update", context.Background(), tc.policy).Return(tc.err) + repoCall3 := pRepo.On("Save", context.Background(), mock.Anything).Return(tc.policy, tc.err) + repoCall4 := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(tc.page, nil) + _, err := svc.AddPolicy(context.Background(), tc.token, tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + tc.policy.Subject = tc.token + areq := policies.AccessRequest{Subject: tc.policy.Subject, Object: tc.policy.Object, Action: tc.policy.Actions[0]} + _, err = svc.Authorize(context.Background(), areq, "client") + require.Nil(t, err, fmt.Sprintf("checking shared %v policy expected to be succeed: %#v", tc.policy, err)) + } + repoCall3.Parent.AssertCalled(t, "Save", context.Background(), mock.Anything) + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + repoCall3.Unset() + repoCall4.Unset() + } + +} + +func TestAuthorize(t *testing.T) { + svc, pRepo := newService(map[string]string{token: adminEmail}) + + cases := []struct { + desc string + policy policies.AccessRequest + domain string + err error + }{ + { + desc: "check valid policy in client domain", + policy: policies.AccessRequest{Object: "client1", Action: "c_update", Subject: token}, + domain: "client", + err: nil, + }, + { + desc: "check valid policy in group domain", + policy: policies.AccessRequest{Object: "client2", Action: "g_update", Subject: token}, + domain: "group", + err: errors.ErrConflict, + }, + { + desc: "check invalid policy in client domain", + policy: policies.AccessRequest{Object: "client3", Action: "c_update", Subject: token}, + domain: "client", + err: nil, + }, + { + desc: "check invalid policy in group domain", + policy: policies.AccessRequest{Object: "client4", Action: "g_update", Subject: token}, + domain: "group", + err: nil, + }, + } + + for _, tc := range cases { + policy := policies.Policy{Object: tc.policy.Object, Actions: []string{tc.policy.Action}, Subject: tc.policy.Subject} + repoCall := pRepo.On("RetrieveOne", context.Background(), mock.Anything, mock.Anything).Return(policy, tc.err) + repoCall1 := pRepo.On("Evaluate", context.Background(), tc.domain, mock.Anything).Return(tc.err) + _, err := svc.Authorize(context.Background(), tc.policy, tc.domain) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + } + +} + +func TestDeletePolicy(t *testing.T) { + + svc, pRepo := newService(map[string]string{token: adminEmail}) + + pr := policies.Policy{Object: testsutil.GenerateUUID(t, idProvider), Actions: memberActions, Subject: testsutil.GenerateUUID(t, idProvider)} + + repoCall := pRepo.On("Delete", context.Background(), mock.Anything).Return(nil) + repoCall1 := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(policies.PolicyPage{Policies: []policies.Policy{pr}}, nil) + err := svc.DeletePolicy(context.Background(), token, pr) + assert.EqualError(t, err, errors.ErrAuthorization.Error(), fmt.Sprintf("deleting %v policy expected to fail: %s", pr, err)) + repoCall.Unset() + repoCall1.Unset() +} + +func TestListPolicies(t *testing.T) { + + svc, pRepo := newService(map[string]string{token: adminEmail}) + + id := testsutil.GenerateUUID(t, idProvider) + + readPolicy := "m_read" + writePolicy := "m_write" + + var nPolicy = uint64(10) + var aPolicies = []policies.Policy{} + for i := uint64(0); i < nPolicy; i++ { + pr := policies.Policy{ + OwnerID: id, + Actions: []string{readPolicy}, + Subject: fmt.Sprintf("thing_%d", i), + Object: fmt.Sprintf("client_%d", i), + } + if i%3 == 0 { + pr.Actions = []string{writePolicy} + } + aPolicies = append(aPolicies, pr) + } + + cases := []struct { + desc string + token string + page policies.Page + response policies.PolicyPage + err error + }{ + { + desc: "list policies with authorized token", + token: token, + err: nil, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 0, + Total: nPolicy, + }, + Policies: aPolicies, + }, + }, + { + desc: "list policies with invalid token", + token: inValidToken, + err: errors.ErrAuthentication, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 0, + }, + }, + }, + { + desc: "list policies with offset and limit", + token: token, + page: policies.Page{ + Offset: 6, + Limit: nPolicy, + }, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 6, + Total: nPolicy, + }, + Policies: aPolicies[6:10], + }, + }, + { + desc: "list policies with wrong action", + token: token, + page: policies.Page{ + Action: "wrong", + }, + response: policies.PolicyPage{}, + err: apiutil.ErrMalformedPolicyAct, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("Retrieve", context.Background(), tc.page).Return(tc.response, tc.err) + page, err := svc.ListPolicies(context.Background(), tc.token, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected size %v got %v\n", tc.desc, tc.response, page)) + repoCall.Unset() + } + +} + +func TestUpdatePolicies(t *testing.T) { + + svc, pRepo := newService(map[string]string{token: adminEmail}) + + policy := policies.Policy{Object: "obj1", Actions: []string{"m_read"}, Subject: "sub1"} + + cases := []struct { + desc string + action []string + token string + err error + }{ + { + desc: "update policy action with invalid token", + action: []string{"m_write"}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + { + desc: "update policy action with wrong policy action", + action: []string{"wrong"}, + token: token, + err: apiutil.ErrMalformedPolicyAct, + }, + } + + for _, tc := range cases { + policy.Actions = tc.action + repoCall := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(policies.PolicyPage{Policies: []policies.Policy{policy}}, nil) + repoCall1 := pRepo.On("Update", context.Background(), mock.Anything).Return(policies.Policy{}, tc.err) + _, err := svc.UpdatePolicy(context.Background(), tc.token, policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + repoCall.Unset() + repoCall1.Unset() + } +} diff --git a/things/policies/tracing/tracing.go b/things/policies/tracing/tracing.go new file mode 100644 index 0000000000..0cd4a48282 --- /dev/null +++ b/things/policies/tracing/tracing.go @@ -0,0 +1,56 @@ +package tracing + +import ( + "context" + + "github.com/mainflux/mainflux/things/policies" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ policies.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + psvc policies.Service +} + +// TracingMiddleware enriches policies with traces for improved monitoring. +func TracingMiddleware(psvc policies.Service, tracer trace.Tracer) policies.Service { + return &tracingMiddleware{tracer, psvc} +} + +func (tm *tracingMiddleware) Authorize(ctx context.Context, ar policies.AccessRequest, entityType string) (string, error) { + ctx, span := tm.tracer.Start(ctx, "svc_authorize_by_key", trace.WithAttributes(attribute.String("subject", ar.Subject), attribute.String("object", ar.Object), attribute.String("action", ar.Action))) + defer span.End() + + return tm.psvc.Authorize(ctx, ar, entityType) +} + +func (tm *tracingMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) (policies.Policy, error) { + ctx, span := tm.tracer.Start(ctx, "svc_connect", trace.WithAttributes(attribute.StringSlice("actions", p.Actions))) + defer span.End() + + return tm.psvc.AddPolicy(ctx, token, p) +} + +func (tm *tracingMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) (policies.Policy, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_policy", trace.WithAttributes(attribute.StringSlice("actions", p.Actions))) + defer span.End() + + return tm.psvc.UpdatePolicy(ctx, token, p) +} + +func (tm *tracingMiddleware) ListPolicies(ctx context.Context, token string, p policies.Page) (policies.PolicyPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_policies", trace.WithAttributes(attribute.String("actions", p.Action))) + defer span.End() + + return tm.psvc.ListPolicies(ctx, token, p) +} + +func (tm *tracingMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) error { + ctx, span := tm.tracer.Start(ctx, "svc_disconnect", trace.WithAttributes(attribute.String("subject", p.Subject), attribute.String("object", p.Object))) + defer span.End() + + return tm.psvc.DeletePolicy(ctx, token, p) +} diff --git a/things/postgres/channels.go b/things/postgres/channels.go deleted file mode 100644 index a4c2e5853f..0000000000 --- a/things/postgres/channels.go +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import ( - "context" - "database/sql" - "database/sql/driver" - "encoding/json" - "fmt" - "strings" - - "github.com/gofrs/uuid" - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -var _ things.ChannelRepository = (*channelRepository)(nil) - -const name = "name" - -type channelRepository struct { - db Database -} - -type dbConnection struct { - Channel string `db:"channel"` - Thing string `db:"thing"` - Owner string `db:"owner"` -} - -// NewChannelRepository instantiates a PostgreSQL implementation of channel -// repository. -func NewChannelRepository(db Database) things.ChannelRepository { - return &channelRepository{ - db: db, - } -} - -func (cr channelRepository) Save(ctx context.Context, channels ...things.Channel) ([]things.Channel, error) { - tx, err := cr.db.BeginTxx(ctx, nil) - if err != nil { - return nil, errors.Wrap(errors.ErrCreateEntity, err) - } - - q := `INSERT INTO channels (id, owner, name, metadata) - VALUES (:id, :owner, :name, :metadata);` - - for _, channel := range channels { - dbch := toDBChannel(channel) - - _, err = tx.NamedExecContext(ctx, q, dbch) - if err != nil { - if err := tx.Rollback(); err != nil { - return []things.Channel{}, errors.Wrap(errors.ErrCreateEntity, err) - } - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return []things.Channel{}, errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return []things.Channel{}, errors.Wrap(errors.ErrConflict, err) - case pgerrcode.StringDataRightTruncationDataException: - return []things.Channel{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return []things.Channel{}, errors.Wrap(errors.ErrCreateEntity, err) - } - } - - if err = tx.Commit(); err != nil { - return []things.Channel{}, errors.Wrap(errors.ErrCreateEntity, err) - } - - return channels, nil -} - -func (cr channelRepository) Update(ctx context.Context, channel things.Channel) error { - q := `UPDATE channels SET name = :name, metadata = :metadata WHERE owner = :owner AND id = :id;` - - dbch := toDBChannel(channel) - - res, err := cr.db.NamedExecContext(ctx, q, dbch) - if err != nil { - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.StringDataRightTruncationDataException: - return errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - cnt, err := res.RowsAffected() - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - if cnt == 0 { - return errors.ErrNotFound - } - - return nil -} - -func (cr channelRepository) RetrieveByID(ctx context.Context, owner, id string) (things.Channel, error) { - q := `SELECT name, metadata, owner FROM channels WHERE id = $1;` - - dbch := dbChannel{ - ID: id, - } - - if err := cr.db.QueryRowxContext(ctx, q, id).StructScan(&dbch); err != nil { - pgErr, ok := err.(*pgconn.PgError) - // If there is no result or ID is in an invalid format, return ErrNotFound. - if err == sql.ErrNoRows || ok && pgerrcode.InvalidTextRepresentation == pgErr.Code { - return things.Channel{}, errors.ErrNotFound - } - return things.Channel{}, errors.Wrap(errors.ErrViewEntity, err) - } - - return toChannel(dbch), nil -} - -func (cr channelRepository) RetrieveAll(ctx context.Context, owner string, pm things.PageMetadata) (things.ChannelsPage, error) { - nq, name := getNameQuery(pm.Name) - oq := getOrderQuery(pm.Order) - dq := getDirQuery(pm.Dir) - ownerQuery := getOwnerQuery(pm.FetchSharedThings) - meta, mq, err := getMetadataQuery(pm.Metadata) - if err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - var whereClause string - var query []string - if mq != "" { - query = append(query, mq) - } - if nq != "" { - query = append(query, nq) - } - if ownerQuery != "" { - query = append(query, ownerQuery) - } - - if len(query) > 0 { - whereClause = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) - } - - q := fmt.Sprintf(`SELECT id, name, metadata FROM channels - %s ORDER BY %s %s LIMIT :limit OFFSET :offset;`, whereClause, oq, dq) - - params := map[string]interface{}{ - "owner": owner, - "limit": pm.Limit, - "offset": pm.Offset, - "name": name, - "metadata": meta, - } - rows, err := cr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - items := []things.Channel{} - for rows.Next() { - dbch := dbChannel{Owner: owner} - if err := rows.StructScan(&dbch); err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - ch := toChannel(dbch) - - items = append(items, ch) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM channels %s;`, whereClause) - - total, err := total(ctx, cr.db, cq, params) - if err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - page := things.ChannelsPage{ - Channels: items, - PageMetadata: things.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - Order: pm.Order, - Dir: pm.Dir, - }, - } - - return page, nil -} - -func (cr channelRepository) RetrieveByThing(ctx context.Context, owner, thID string, pm things.PageMetadata) (things.ChannelsPage, error) { - oq := getConnOrderQuery(pm.Order, "ch") - dq := getDirQuery(pm.Dir) - - // Verify if UUID format is valid to avoid internal Postgres error - if _, err := uuid.FromString(thID); err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrNotFound, err) - } - - var q, qc string - switch pm.Disconnected { - case true: - q = fmt.Sprintf(`SELECT id, name, metadata - FROM channels ch - WHERE ch.owner = :owner AND ch.id NOT IN - (SELECT id FROM channels ch - INNER JOIN connections conn - ON ch.id = conn.channel_id - WHERE ch.owner = :owner AND conn.thing_id = :thing) - ORDER BY %s %s - LIMIT :limit - OFFSET :offset;`, oq, dq) - - qc = `SELECT COUNT(*) - FROM channels ch - WHERE ch.owner = $1 AND ch.id NOT IN - (SELECT id FROM channels ch - INNER JOIN connections conn - ON ch.id = conn.channel_id - WHERE ch.owner = $1 AND conn.thing_id = $2);` - default: - q = fmt.Sprintf(`SELECT id, name, metadata FROM channels ch - INNER JOIN connections conn - ON ch.id = conn.channel_id - WHERE ch.owner = :owner AND conn.thing_id = :thing - ORDER BY %s %s - LIMIT :limit - OFFSET :offset;`, oq, dq) - - qc = `SELECT COUNT(*) - FROM channels ch - INNER JOIN connections conn - ON ch.id = conn.channel_id - WHERE ch.owner = $1 AND conn.thing_id = $2` - } - - params := map[string]interface{}{ - "owner": owner, - "thing": thID, - "limit": pm.Limit, - "offset": pm.Offset, - } - - rows, err := cr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - items := []things.Channel{} - for rows.Next() { - dbch := dbChannel{Owner: owner} - if err := rows.StructScan(&dbch); err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - ch := toChannel(dbch) - items = append(items, ch) - } - - var total uint64 - if err := cr.db.GetContext(ctx, &total, qc, owner, thID); err != nil { - return things.ChannelsPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - return things.ChannelsPage{ - Channels: items, - PageMetadata: things.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - }, - }, nil -} - -func (cr channelRepository) Remove(ctx context.Context, owner, id string) error { - dbch := dbChannel{ - ID: id, - Owner: owner, - } - q := `DELETE FROM channels WHERE id = :id AND owner = :owner` - - _, err := cr.db.NamedExecContext(ctx, q, dbch) - return err -} - -func (cr channelRepository) Connect(ctx context.Context, owner string, chIDs, thIDs []string) error { - tx, err := cr.db.BeginTxx(ctx, nil) - if err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - q := `INSERT INTO connections (channel_id, channel_owner, thing_id, thing_owner) - VALUES (:channel, :owner, :thing, :owner);` - - for _, chID := range chIDs { - for _, thID := range thIDs { - dbco := dbConnection{ - Channel: chID, - Thing: thID, - Owner: owner, - } - - _, err := tx.NamedExecContext(ctx, q, dbco) - if err != nil { - if err := tx.Rollback(); err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.ForeignKeyViolation: - return errors.ErrNotFound - case pgerrcode.UniqueViolation: - return errors.ErrConflict - } - } - - return errors.Wrap(things.ErrConnect, err) - } - } - } - - if err = tx.Commit(); err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - return nil -} - -func (cr channelRepository) Disconnect(ctx context.Context, owner string, chIDs, thIDs []string) error { - tx, err := cr.db.BeginTxx(ctx, nil) - if err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - q := `DELETE FROM connections - WHERE channel_id = :channel AND channel_owner = :owner - AND thing_id = :thing AND thing_owner = :owner` - - for _, chID := range chIDs { - for _, thID := range thIDs { - dbco := dbConnection{ - Channel: chID, - Thing: thID, - Owner: owner, - } - - res, err := tx.NamedExecContext(ctx, q, dbco) - if err != nil { - err = tx.Rollback() - if err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.ForeignKeyViolation: - return errors.ErrNotFound - case pgerrcode.UniqueViolation: - return errors.ErrConflict - } - } - return errors.Wrap(things.ErrDisconnect, err) - } - - cnt, err := res.RowsAffected() - if err != nil { - return errors.Wrap(things.ErrDisconnect, err) - } - - if cnt == 0 { - return errors.ErrNotFound - } - } - } - - if err = tx.Commit(); err != nil { - return errors.Wrap(things.ErrConnect, err) - } - - return nil -} - -func (cr channelRepository) HasThing(ctx context.Context, chanID, thingKey string) (string, error) { - var thingID string - q := `SELECT id FROM things WHERE key = $1` - if err := cr.db.QueryRowxContext(ctx, q, thingKey).Scan(&thingID); err != nil { - return "", errors.Wrap(errors.ErrViewEntity, err) - } - - if err := cr.hasThing(ctx, chanID, thingID); err != nil { - return "", err - } - - return thingID, nil -} - -func (cr channelRepository) HasThingByID(ctx context.Context, chanID, thingID string) error { - return cr.hasThing(ctx, chanID, thingID) -} - -func (cr channelRepository) hasThing(ctx context.Context, chanID, thingID string) error { - q := `SELECT EXISTS (SELECT 1 FROM connections WHERE channel_id = $1 AND thing_id = $2);` - exists := false - if err := cr.db.QueryRowxContext(ctx, q, chanID, thingID).Scan(&exists); err != nil { - return errors.Wrap(errors.ErrViewEntity, err) - } - - if !exists { - return errors.ErrNotFound - } - - return nil -} - -// dbMetadata type for handling metadata properly in database/sql. -type dbMetadata map[string]interface{} - -// Scan implements the database/sql scanner interface. -// When interface is nil `m` is set to nil. -// If error occurs on casting data then m points to empty metadata. -func (m *dbMetadata) Scan(value interface{}) error { - if value == nil { - *m = dbMetadata{} - return nil - } - - b, ok := value.([]byte) - if !ok { - *m = dbMetadata{} - return errors.ErrScanMetadata - } - - if err := json.Unmarshal(b, m); err != nil { - return err - } - - return nil -} - -// Value implements database/sql valuer interface. -func (m dbMetadata) Value() (driver.Value, error) { - if len(m) == 0 { - return nil, nil - } - - b, err := json.Marshal(m) - if err != nil { - return nil, err - } - return b, err -} - -type dbChannel struct { - ID string `db:"id"` - Owner string `db:"owner"` - Name string `db:"name"` - Metadata dbMetadata `db:"metadata"` -} - -func toDBChannel(ch things.Channel) dbChannel { - return dbChannel{ - ID: ch.ID, - Owner: ch.Owner, - Name: ch.Name, - Metadata: ch.Metadata, - } -} - -func toChannel(ch dbChannel) things.Channel { - return things.Channel{ - ID: ch.ID, - Owner: ch.Owner, - Name: ch.Name, - Metadata: ch.Metadata, - } -} - -func getNameQuery(name string) (string, string) { - if name == "" { - return "", "" - } - name = fmt.Sprintf(`%%%s%%`, strings.ToLower(name)) - nq := `LOWER(name) LIKE :name` - return nq, name -} - -func getOrderQuery(order string) string { - switch order { - case name: - return name - default: - return "id" - } -} - -func getConnOrderQuery(order string, level string) string { - switch order { - case name: - return level + ".name" - default: - return level + ".id" - } -} - -func getDirQuery(dir string) string { - switch dir { - case "asc": - return "ASC" - default: - return "DESC" - } -} - -func getMetadataQuery(m things.Metadata) ([]byte, string, error) { - mq := "" - mb := []byte("{}") - if len(m) > 0 { - mq = `metadata @> :metadata` - - b, err := json.Marshal(m) - if err != nil { - return nil, "", err - } - mb = b - } - return mb, mq, nil -} - -func total(ctx context.Context, db Database, query string, params interface{}) (uint64, error) { - rows, err := db.NamedQueryContext(ctx, query, params) - if err != nil { - return 0, err - } - defer rows.Close() - total := uint64(0) - if rows.Next() { - if err := rows.Scan(&total); err != nil { - return 0, err - } - } - return total, nil -} diff --git a/things/postgres/channels_test.go b/things/postgres/channels_test.go deleted file mode 100644 index b88b53689b..0000000000 --- a/things/postgres/channels_test.go +++ /dev/null @@ -1,936 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres_test - -import ( - "context" - "fmt" - "testing" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" - "github.com/mainflux/mainflux/things/postgres" - "github.com/stretchr/testify/assert" -) - -func TestChannelsSave(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - channelRepo := postgres.NewChannelRepository(dbMiddleware) - - email := "channel-save@example.com" - - chs := []things.Channel{} - for i := 1; i <= 5; i++ { - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - ch := things.Channel{ - ID: id, - Owner: email, - } - chs = append(chs, ch) - } - id := chs[0].ID - - cases := []struct { - desc string - channels []things.Channel - response []things.Channel - err error - }{ - { - desc: "create new channels", - channels: chs, - response: chs, - err: nil, - }, - { - desc: "create channels that already exist", - channels: chs, - response: []things.Channel{}, - err: errors.ErrConflict, - }, - { - desc: "create channel with invalid ID", - channels: []things.Channel{ - {ID: "invalid", Owner: email}, - }, - response: []things.Channel{}, - err: errors.ErrMalformedEntity, - }, - { - desc: "create channel with invalid name", - channels: []things.Channel{ - {ID: id, Owner: email, Name: invalidName}, - }, - response: []things.Channel{}, - err: errors.ErrMalformedEntity, - }, - { - desc: "create channel with invalid name", - channels: []things.Channel{ - {ID: id, Owner: email, Name: invalidName}, - }, - response: []things.Channel{}, - err: errors.ErrMalformedEntity, - }, - } - - for _, tc := range cases { - _, err := channelRepo.Save(context.Background(), tc.channels...) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestChannelUpdate(t *testing.T) { - email := "channel-update@example.com" - dbMiddleware := postgres.NewDatabase(db) - chanRepo := postgres.NewChannelRepository(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := things.Channel{ - ID: id, - Owner: email, - } - - chs, err := chanRepo.Save(context.Background(), ch) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch.ID = chs[0].ID - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - channel things.Channel - err error - }{ - { - desc: "update existing channel", - channel: ch, - err: nil, - }, - { - desc: "update non-existing channel with existing user", - channel: things.Channel{ - ID: nonexistentChanID, - Owner: email, - }, - err: errors.ErrNotFound, - }, - { - desc: "update existing channel ID with non-existing user", - channel: things.Channel{ - ID: ch.ID, - Owner: wrongValue, - }, - err: errors.ErrNotFound, - }, - { - desc: "update non-existing channel with non-existing user", - channel: things.Channel{ - ID: nonexistentChanID, - Owner: wrongValue, - }, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := chanRepo.Update(context.Background(), tc.channel) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestSingleChannelRetrieval(t *testing.T) { - email := "channel-single-retrieval@example.com" - dbMiddleware := postgres.NewDatabase(db) - chanRepo := postgres.NewChannelRepository(dbMiddleware) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - ths, _ := thingRepo.Save(context.Background(), th) - th.ID = ths[0].ID - - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := things.Channel{ - ID: chID, - Owner: email, - Metadata: make(map[string]interface{}), - } - chs, _ := chanRepo.Save(context.Background(), ch) - ch.ID = chs[0].ID - - err = chanRepo.Connect(context.Background(), email, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - ID string - response things.Channel - err error - }{ - { - desc: "retrieve channel with existing user", - owner: ch.Owner, - ID: ch.ID, - response: ch, - err: nil, - }, - { - desc: "retrieve channel with existing user, non-existing channel", - owner: ch.Owner, - ID: nonexistentChanID, - response: things.Channel{}, - err: errors.ErrNotFound, - }, - { - desc: "retrieve channel with malformed ID", - owner: ch.Owner, - ID: wrongValue, - response: things.Channel{}, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - resp, err := chanRepo.RetrieveByID(context.Background(), tc.owner, tc.ID) - assert.Equal(t, tc.response, resp, fmt.Sprintf("%s: got incorrect channel from RetrieveByID()", tc.desc)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestMultiChannelRetrieval(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - chanRepo := postgres.NewChannelRepository(dbMiddleware) - - email := "channel-multi-retrieval@example.com" - name := "channel_name" - metadata := things.Metadata{ - "field": "value", - } - wrongMeta := things.Metadata{ - "wrong": "wrong", - } - - offset := uint64(1) - nameNum := uint64(3) - metaNum := uint64(3) - nameMetaNum := uint64(2) - - n := uint64(10) - for i := uint64(0); i < n; i++ { - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - ch := things.Channel{ - ID: chID, - Owner: email, - } - - // Create Channels with name. - if i < nameNum { - ch.Name = fmt.Sprintf("%s-%d", name, i) - } - // Create Channels with metadata. - if i >= nameNum && i < nameNum+metaNum { - ch.Metadata = metadata - } - // Create Channels with name and metadata. - if i >= n-nameMetaNum { - ch.Metadata = metadata - ch.Name = name - } - - _, err = chanRepo.Save(context.Background(), ch) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while saving channels: %s", err)) - } - - cases := []struct { - desc string - owner string - size uint64 - pageMetadata things.PageMetadata - }{ - { - desc: "retrieve all channels with existing owner", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - }, - size: n, - }, - { - desc: "retrieve subset of channels with existing owner", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - Total: n, - }, - size: n / 2, - }, - { - desc: "retrieve channels with non-existing owner", - owner: wrongValue, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - Total: 0, - }, - size: 0, - }, - { - desc: "retrieve channels with existing name", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: offset, - Limit: n, - Name: name, - Total: nameNum + nameMetaNum, - }, - size: nameNum + nameMetaNum - offset, - }, - { - desc: "retrieve all channels with non-existing name", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Name: "wrong", - Total: 0, - }, - size: 0, - }, - { - desc: "retrieve all channels with existing metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Metadata: metadata, - Total: metaNum + nameMetaNum, - }, - size: metaNum + nameMetaNum, - }, - { - desc: "retrieve all channels with non-existing metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Metadata: wrongMeta, - Total: 0, - }, - }, - { - desc: "retrieve all channels with existing name and metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Name: name, - Metadata: metadata, - Total: nameMetaNum, - }, - size: nameMetaNum, - }, - { - desc: "retrieve channels sorted by name ascendent", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - Order: "name", - Dir: "asc", - }, - size: n, - }, - { - desc: "retrieve channels sorted by name descendent", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - Order: "name", - Dir: "desc", - }, - size: n, - }, - } - - for _, tc := range cases { - page, err := chanRepo.RetrieveAll(context.Background(), tc.owner, tc.pageMetadata) - size := uint64(len(page.Channels)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.Equal(t, tc.pageMetadata.Total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", tc.desc, tc.pageMetadata.Total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", tc.desc, err)) - - // Check if Channels list have been sorted properly - testSortChannels(t, tc.pageMetadata, page.Channels) - } -} - -func TestRetrieveByThing(t *testing.T) { - email := "channel-multi-retrieval-by-thing@example.com" - dbMiddleware := postgres.NewDatabase(db) - chanRepo := postgres.NewChannelRepository(dbMiddleware) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ths, err := thingRepo.Save(context.Background(), things.Thing{ - ID: thID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - thID = ths[0].ID - - n := uint64(10) - chsDisconNum := uint64(1) - - for i := uint64(0); i < n; i++ { - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - ch := things.Channel{ - ID: chID, - Owner: email, - } - schs, err := chanRepo.Save(context.Background(), ch) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - cid := schs[0].ID - - // Don't connect last Channel - if i == n-chsDisconNum { - break - } - - err = chanRepo.Connect(context.Background(), email, []string{cid}, []string{thID}) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - thID string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "retrieve all channels by thing with existing owner", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n - chsDisconNum, - }, - { - desc: "retrieve subset of channels by thing with existing owner", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: (n / 2) - chsDisconNum, - }, - { - desc: "retrieve channels by thing with non-existing owner", - owner: wrongValue, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: 0, - }, - { - desc: "retrieve channels by non-existent thing", - owner: email, - thID: nonexistentThingID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - }, - { - desc: "retrieve channels with malformed UUID", - owner: email, - thID: wrongValue, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - err: errors.ErrNotFound, - }, - { - desc: "retrieve all non connected channels by thing with existing owner", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - }, - size: chsDisconNum, - }, - { - desc: "retrieve all channels by thing sorted by name ascendent", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n - chsDisconNum, - }, - { - desc: "retrieve all non-connected channels by thing sorted by name ascendent", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "asc", - }, - size: chsDisconNum, - }, - { - desc: "retrieve all channels by thing sorted by name descendent", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n - chsDisconNum, - }, - { - desc: "retrieve all non-connected channels by thing sorted by name descendent", - owner: email, - thID: thID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "desc", - }, - size: chsDisconNum, - }, - } - - for _, tc := range cases { - page, err := chanRepo.RetrieveByThing(context.Background(), tc.owner, tc.thID, tc.pageMetadata) - size := uint64(len(page.Channels)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected no error got %d\n", tc.desc, err)) - - // Check if Channels by Thing list have been sorted properly - testSortChannels(t, tc.pageMetadata, page.Channels) - } -} - -func TestChannelRemoval(t *testing.T) { - email := "channel-removal@example.com" - dbMiddleware := postgres.NewDatabase(db) - chanRepo := postgres.NewChannelRepository(dbMiddleware) - - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - chs, err := chanRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chID = chs[0].ID - - // show that the removal works the same for both existing and non-existing (removed) channel - for i := 0; i < 2; i++ { - err := chanRepo.Remove(context.Background(), email, chID) - assert.Nil(t, err, fmt.Sprintf("#%d: failed to remove channel due to: %s", i, err)) - - resp, err := chanRepo.RetrieveByID(context.Background(), email, chID) - assert.Equal(t, things.Channel{}, resp) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("#%d: expected %s got %s", i, errors.ErrNotFound, err)) - } -} - -func TestConnect(t *testing.T) { - email := "channel-connect@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - Metadata: things.Metadata{}, - } - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - thID = ths[0].ID - - chanRepo := postgres.NewChannelRepository(dbMiddleware) - - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - chs, err := chanRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chID = chs[0].ID - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - chID string - thID string - err error - }{ - { - desc: "connect existing user, channel and thing", - owner: email, - chID: chID, - thID: thID, - err: nil, - }, - { - desc: "connect connected channel and thing", - owner: email, - chID: chID, - thID: thID, - err: errors.ErrConflict, - }, - { - desc: "connect with non-existing user", - owner: wrongValue, - chID: chID, - thID: thID, - err: errors.ErrNotFound, - }, - { - desc: "connect non-existing channel", - owner: email, - chID: nonexistentChanID, - thID: thID, - err: errors.ErrNotFound, - }, - { - desc: "connect non-existing thing", - owner: email, - chID: chID, - thID: nonexistentThingID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := chanRepo.Connect(context.Background(), tc.owner, []string{tc.chID}, []string{tc.thID}) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestDisconnect(t *testing.T) { - email := "channel-disconnect@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - Metadata: map[string]interface{}{}, - } - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - thID = ths[0].ID - - chanRepo := postgres.NewChannelRepository(dbMiddleware) - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - chs, err := chanRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chID = chs[0].ID - - err = chanRepo.Connect(context.Background(), email, []string{chID}, []string{thID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - chID string - thID string - err error - }{ - { - desc: "disconnect connected thing", - owner: email, - chID: chID, - thID: thID, - err: nil, - }, - { - desc: "disconnect non-connected thing", - owner: email, - chID: chID, - thID: thID, - err: errors.ErrNotFound, - }, - { - desc: "disconnect non-existing user", - owner: wrongValue, - chID: chID, - thID: thID, - err: errors.ErrNotFound, - }, - { - desc: "disconnect non-existing channel", - owner: email, - chID: nonexistentChanID, - thID: thID, - err: errors.ErrNotFound, - }, - { - desc: "disconnect non-existing thing", - owner: email, - chID: chID, - thID: nonexistentThingID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := chanRepo.Disconnect(context.Background(), tc.owner, []string{tc.chID}, []string{tc.thID}) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestHasThing(t *testing.T) { - email := "channel-access-check@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - thID = ths[0].ID - - chanRepo := postgres.NewChannelRepository(dbMiddleware) - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - chs, err := chanRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chID = chs[0].ID - - err = chanRepo.Connect(context.Background(), email, []string{chID}, []string{thID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - chID string - key string - hasAccess bool - }{ - { - desc: "access check for thing that has access", - chID: chID, - key: th.Key, - hasAccess: true, - }, - { - desc: "access check for thing without access", - chID: chID, - key: wrongValue, - hasAccess: false, - }, - { - desc: "access check for non-existing channel", - chID: nonexistentChanID, - key: th.Key, - hasAccess: false, - }, - } - - for _, tc := range cases { - _, err := chanRepo.HasThing(context.Background(), tc.chID, tc.key) - assert.Equal(t, tc.hasAccess, err == nil, fmt.Sprintf("%s: expected %t got %t\n", tc.desc, tc.hasAccess, err == nil)) - } -} - -func TestHasThingByID(t *testing.T) { - email := "channel-access-check@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - thID = ths[0].ID - - disconnectedThID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - disconnectedThKey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - disconnectedThing := things.Thing{ - ID: disconnectedThID, - Owner: email, - Key: disconnectedThKey, - } - ths, err = thingRepo.Save(context.Background(), disconnectedThing) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - disconnectedThingID := ths[0].ID - - chanRepo := postgres.NewChannelRepository(dbMiddleware) - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - chs, err := chanRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chID = chs[0].ID - - err = chanRepo.Connect(context.Background(), email, []string{chID}, []string{thID}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while connecting to service: %s", err)) - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - chID string - thID string - hasAccess bool - }{ - { - desc: "access check for thing that has access", - chID: chID, - thID: thID, - hasAccess: true, - }, - { - desc: "access check for thing without access", - chID: chID, - thID: disconnectedThingID, - hasAccess: false, - }, - { - desc: "access check for non-existing channel", - chID: nonexistentChanID, - thID: thID, - hasAccess: false, - }, - { - desc: "access check for non-existing thing", - chID: chID, - thID: wrongValue, - hasAccess: false, - }, - } - - for _, tc := range cases { - err := chanRepo.HasThingByID(context.Background(), tc.chID, tc.thID) - assert.Equal(t, tc.hasAccess, err == nil, fmt.Sprintf("%s: expected %t got %t\n", tc.desc, tc.hasAccess, err == nil)) - } -} - -func testSortChannels(t *testing.T, pm things.PageMetadata, chs []things.Channel) { - switch pm.Order { - case "name": - current := chs[0] - for _, res := range chs { - if pm.Dir == "asc" { - assert.GreaterOrEqual(t, res.Name, current.Name) - } - if pm.Dir == "desc" { - assert.GreaterOrEqual(t, current.Name, res.Name) - } - current = res - } - default: - break - } -} diff --git a/things/postgres/database.go b/things/postgres/database.go deleted file mode 100644 index 2e71ba6ef9..0000000000 --- a/things/postgres/database.go +++ /dev/null @@ -1,71 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - - "github.com/jmoiron/sqlx" - "github.com/opentracing/opentracing-go" -) - -var _ Database = (*database)(nil) - -type database struct { - db *sqlx.DB -} - -// Database provides a database interface -type Database interface { - NamedExecContext(context.Context, string, interface{}) (sql.Result, error) - QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row - NamedQueryContext(context.Context, string, interface{}) (*sqlx.Rows, error) - GetContext(context.Context, interface{}, string, ...interface{}) error - BeginTxx(context.Context, *sql.TxOptions) (*sqlx.Tx, error) -} - -// NewDatabase creates a ThingDatabase instance -func NewDatabase(db *sqlx.DB) Database { - return &database{ - db: db, - } -} - -func (dm database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { - addSpanTags(ctx, query) - return dm.db.NamedExecContext(ctx, query, args) -} - -func (dm database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { - addSpanTags(ctx, query) - return dm.db.QueryRowxContext(ctx, query, args...) -} - -func (dm database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { - addSpanTags(ctx, query) - return dm.db.NamedQueryContext(ctx, query, args) -} - -func (dm database) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - addSpanTags(ctx, query) - return dm.db.GetContext(ctx, dest, query, args...) -} - -func (dm database) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*sqlx.Tx, error) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } - return dm.db.BeginTxx(ctx, opts) -} - -func addSpanTags(ctx context.Context, query string) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("sql.statement", query) - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } -} diff --git a/things/postgres/doc.go b/things/postgres/doc.go deleted file mode 100644 index 522b9a7acf..0000000000 --- a/things/postgres/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres contains repository implementations using PostgreSQL as -// the underlying database. -package postgres diff --git a/things/postgres/init.go b/things/postgres/init.go index be1a7b350f..51c8848c1d 100644 --- a/things/postgres/init.go +++ b/things/postgres/init.go @@ -1,69 +1,64 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package postgres -import migrate "github.com/rubenv/sql-migrate" +import ( + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + migrate "github.com/rubenv/sql-migrate" +) -// Migration of Things service func Migration() *migrate.MemoryMigrationSource { return &migrate.MemoryMigrationSource{ Migrations: []*migrate.Migration{ { - Id: "things_1", + Id: "clients_01", + // VARCHAR(36) for colums with IDs as UUIDS have a maximum of 36 characters + // STATUS 0 to imply enabled and 1 to imply disabled Up: []string{ - `CREATE TABLE IF NOT EXISTS things ( - id UUID, - owner VARCHAR(254), - key VARCHAR(4096) UNIQUE NOT NULL, - name VARCHAR(1024), - metadata JSON, - PRIMARY KEY (id, owner) + `CREATE TABLE IF NOT EXISTS clients ( + id VARCHAR(36) PRIMARY KEY, + name VARCHAR(1024), + owner_id VARCHAR(36), + identity VARCHAR(254), + secret VARCHAR(4096) NOT NULL UNIQUE, + tags TEXT[], + metadata JSONB, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0), + UNIQUE (owner_id, secret) )`, - `CREATE TABLE IF NOT EXISTS channels ( - id UUID, - owner VARCHAR(254), - name VARCHAR(1024), - metadata JSON, - PRIMARY KEY (id, owner) + `CREATE TABLE IF NOT EXISTS groups ( + id VARCHAR(36) PRIMARY KEY, + parent_id VARCHAR(36), + owner_id VARCHAR(36) NOT NULL, + name VARCHAR(1024) NOT NULL, + description VARCHAR(1024), + metadata JSONB, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0), + UNIQUE (owner_id, name), + FOREIGN KEY (parent_id) REFERENCES groups (id) ON DELETE CASCADE )`, - `CREATE TABLE IF NOT EXISTS connections ( - channel_id UUID, - channel_owner VARCHAR(254), - thing_id UUID, - thing_owner VARCHAR(254), - FOREIGN KEY (channel_id, channel_owner) REFERENCES channels (id, owner) ON DELETE CASCADE ON UPDATE CASCADE, - FOREIGN KEY (thing_id, thing_owner) REFERENCES things (id, owner) ON DELETE CASCADE ON UPDATE CASCADE, - PRIMARY KEY (channel_id, channel_owner, thing_id, thing_owner) + `CREATE TABLE IF NOT EXISTS policies ( + owner_id VARCHAR(36) NOT NULL, + subject VARCHAR(36) NOT NULL, + object VARCHAR(36) NOT NULL, + actions TEXT[] NOT NULL, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + FOREIGN KEY (object) REFERENCES groups (id) ON DELETE CASCADE ON UPDATE CASCADE, + PRIMARY KEY (subject, object) )`, }, Down: []string{ - "DROP TABLE connections", - "DROP TABLE things", - "DROP TABLE channels", - }, - }, - { - Id: "things_2", - Up: []string{ - `ALTER TABLE IF EXISTS things ALTER COLUMN - metadata TYPE JSONB using metadata::text::jsonb`, - }, - }, - { - Id: "things_3", - Up: []string{ - `ALTER TABLE IF EXISTS channels ALTER COLUMN - metadata TYPE JSONB using metadata::text::jsonb`, - }, - }, - { - Id: "things_4", - Up: []string{ - `ALTER TABLE IF EXISTS things ADD CONSTRAINT things_id_key UNIQUE (id)`, + `DROP TABLE IF EXISTS clients`, + `DROP TABLE IF EXISTS groups`, + `DROP TABLE IF EXISTS policies`, }, }, }, } - } diff --git a/things/postgres/setup_test.go b/things/postgres/setup_test.go deleted file mode 100644 index 823fea1028..0000000000 --- a/things/postgres/setup_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres_test contains tests for PostgreSQL repository -// implementations. -package postgres_test - -import ( - "fmt" - "log" - "os" - "testing" - - "github.com/jmoiron/sqlx" - pgClient "github.com/mainflux/mainflux/internal/clients/postgres" - "github.com/mainflux/mainflux/things/postgres" - dockertest "github.com/ory/dockertest/v3" -) - -const ( - wrongValue = "wrong-value" -) - -var ( - db *sqlx.DB -) - -func TestMain(m *testing.M) { - pool, err := dockertest.NewPool("") - if err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - cfg := []string{ - "POSTGRES_USER=test", - "POSTGRES_PASSWORD=test", - "POSTGRES_DB=test", - } - container, err := pool.Run("postgres", "13.3-alpine", cfg) - if err != nil { - log.Fatalf("Could not start container: %s", err) - } - - port := container.GetPort("5432/tcp") - - if err := pool.Retry(func() error { - url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) - db, err = sqlx.Open("pgx", url) - if err != nil { - return err - } - return db.Ping() - }); err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - dbConfig := pgClient.Config{ - Host: "localhost", - Port: port, - User: "test", - Pass: "test", - Name: "test", - SSLMode: "disable", - SSLCert: "", - SSLKey: "", - SSLRootCert: "", - } - - if db, err = pgClient.SetupDB(dbConfig, *postgres.Migration()); err != nil { - log.Fatalf("Could not setup test DB connection: %s", err) - } - - code := m.Run() - - // Defers will not be run when using os.Exit - db.Close() - if err := pool.Purge(container); err != nil { - log.Fatalf("Could not purge container: %s", err) - } - - os.Exit(code) -} diff --git a/things/postgres/things.go b/things/postgres/things.go deleted file mode 100644 index d021b5f025..0000000000 --- a/things/postgres/things.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "strings" - - "github.com/gofrs/uuid" - - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -var _ things.ThingRepository = (*thingRepository)(nil) - -type thingRepository struct { - db Database -} - -// NewThingRepository instantiates a PostgreSQL implementation of thing -// repository. -func NewThingRepository(db Database) things.ThingRepository { - return &thingRepository{ - db: db, - } -} - -func (tr thingRepository) Save(ctx context.Context, ths ...things.Thing) ([]things.Thing, error) { - tx, err := tr.db.BeginTxx(ctx, nil) - if err != nil { - return []things.Thing{}, errors.Wrap(errors.ErrCreateEntity, err) - } - - q := `INSERT INTO things (id, owner, name, key, metadata) - VALUES (:id, :owner, :name, :key, :metadata);` - - for _, thing := range ths { - dbth, err := toDBThing(thing) - if err != nil { - return []things.Thing{}, errors.Wrap(errors.ErrCreateEntity, err) - } - - if _, err := tx.NamedExecContext(ctx, q, dbth); err != nil { - if rollbackErr := tx.Rollback(); rollbackErr != nil { - err = errors.Wrap(err, rollbackErr) - return []things.Thing{}, errors.Wrap(errors.ErrCreateEntity, err) - } - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return []things.Thing{}, errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return []things.Thing{}, errors.Wrap(errors.ErrConflict, err) - case pgerrcode.StringDataRightTruncationDataException: - return []things.Thing{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return []things.Thing{}, errors.Wrap(errors.ErrCreateEntity, err) - } - } - - if err = tx.Commit(); err != nil { - return []things.Thing{}, errors.Wrap(errors.ErrCreateEntity, err) - } - - return ths, nil -} - -func (tr thingRepository) Update(ctx context.Context, t things.Thing) error { - q := `UPDATE things SET name = :name, metadata = :metadata WHERE id = :id;` - - dbth, err := toDBThing(t) - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - res, errdb := tr.db.NamedExecContext(ctx, q, dbth) - if errdb != nil { - pgErr, ok := errdb.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, errdb) - case pgerrcode.StringDataRightTruncationDataException: - return errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return errors.Wrap(errors.ErrUpdateEntity, errdb) - } - - cnt, errdb := res.RowsAffected() - if errdb != nil { - return errors.Wrap(errors.ErrUpdateEntity, errdb) - } - - if cnt == 0 { - return errors.ErrNotFound - } - - return nil -} - -func (tr thingRepository) UpdateKey(ctx context.Context, owner, id, key string) error { - q := `UPDATE things SET key = :key WHERE owner = :owner AND id = :id;` - - dbth := dbThing{ - ID: id, - Owner: owner, - Key: key, - } - - res, err := tr.db.NamedExecContext(ctx, q, dbth) - if err != nil { - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return errors.Wrap(errors.ErrConflict, err) - case pgerrcode.StringDataRightTruncationDataException: - return errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - cnt, err := res.RowsAffected() - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - if cnt == 0 { - return errors.ErrNotFound - } - - return nil -} - -func (tr thingRepository) RetrieveByID(ctx context.Context, owner, id string) (things.Thing, error) { - q := `SELECT name, key, metadata FROM things WHERE id = $1;` - - dbth := dbThing{ID: id, Owner: owner} - - if err := tr.db.QueryRowxContext(ctx, q, id).StructScan(&dbth); err != nil { - pgErr, ok := err.(*pgconn.PgError) - // If there is no result or ID is in an invalid format, return ErrNotFound. - if err == sql.ErrNoRows || ok && pgerrcode.InvalidTextRepresentation == pgErr.Code { - return things.Thing{}, errors.Wrap(errors.ErrNotFound, err) - } - return things.Thing{}, errors.Wrap(errors.ErrViewEntity, err) - } - return toThing(dbth) -} - -func (tr thingRepository) RetrieveByKey(ctx context.Context, key string) (string, error) { - q := `SELECT id FROM things WHERE key = $1;` - - var id string - if err := tr.db.QueryRowxContext(ctx, q, key).Scan(&id); err != nil { - if err == sql.ErrNoRows { - return "", errors.Wrap(errors.ErrNotFound, err) - } - return "", errors.Wrap(errors.ErrViewEntity, err) - } - - return id, nil -} - -func (tr thingRepository) RetrieveByIDs(ctx context.Context, thingIDs []string, pm things.PageMetadata) (things.Page, error) { - if len(thingIDs) == 0 { - return things.Page{}, nil - } - - nq, name := getNameQuery(pm.Name) - oq := getOrderQuery(pm.Order) - dq := getDirQuery(pm.Dir) - idq := fmt.Sprintf("WHERE id IN ('%s') ", strings.Join(thingIDs, "','")) - - m, mq, err := getMetadataQuery(pm.Metadata) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - q := fmt.Sprintf(`SELECT id, owner, name, key, metadata FROM things - %s%s%s ORDER BY %s %s LIMIT :limit OFFSET :offset;`, idq, mq, nq, oq, dq) - - params := map[string]interface{}{ - "limit": pm.Limit, - "offset": pm.Offset, - "name": name, - "metadata": m, - } - - rows, err := tr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - var items []things.Thing - for rows.Next() { - dbth := dbThing{} - if err := rows.StructScan(&dbth); err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - th, err := toThing(dbth) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - items = append(items, th) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM things %s%s%s;`, idq, mq, nq) - - total, err := total(ctx, tr.db, cq, params) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - page := things.Page{ - Things: items, - PageMetadata: things.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - Order: pm.Order, - Dir: pm.Dir, - }, - } - - return page, nil -} - -func getOwnerQuery(fetchSharedThings bool) string { - if fetchSharedThings { - return "" - } - return "owner = :owner" -} - -func (tr thingRepository) RetrieveAll(ctx context.Context, owner string, pm things.PageMetadata) (things.Page, error) { - nq, name := getNameQuery(pm.Name) - oq := getOrderQuery(pm.Order) - dq := getDirQuery(pm.Dir) - ownerQuery := getOwnerQuery(pm.FetchSharedThings) - m, mq, err := getMetadataQuery(pm.Metadata) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - var query []string - if mq != "" { - query = append(query, mq) - } - if nq != "" { - query = append(query, nq) - } - if ownerQuery != "" { - query = append(query, ownerQuery) - } - - var whereClause string - if len(query) > 0 { - whereClause = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) - } - - q := fmt.Sprintf(`SELECT id, name, key, metadata FROM things - %s ORDER BY %s %s LIMIT :limit OFFSET :offset;`, whereClause, oq, dq) - params := map[string]interface{}{ - "owner": owner, - "limit": pm.Limit, - "offset": pm.Offset, - "name": name, - "metadata": m, - } - - rows, err := tr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - var items []things.Thing - for rows.Next() { - dbth := dbThing{Owner: owner} - if err := rows.StructScan(&dbth); err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - th, err := toThing(dbth) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - items = append(items, th) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM things %s;`, whereClause) - - total, err := total(ctx, tr.db, cq, params) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - page := things.Page{ - Things: items, - PageMetadata: things.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - Order: pm.Order, - Dir: pm.Dir, - }, - } - - return page, nil -} - -func (tr thingRepository) RetrieveByChannel(ctx context.Context, owner, chID string, pm things.PageMetadata) (things.Page, error) { - oq := getConnOrderQuery(pm.Order, "th") - dq := getDirQuery(pm.Dir) - - // Verify if UUID format is valid to avoid internal Postgres error - if _, err := uuid.FromString(chID); err != nil { - return things.Page{}, errors.Wrap(errors.ErrNotFound, err) - } - - var q, qc string - switch pm.Disconnected { - case true: - q = fmt.Sprintf(`SELECT id, name, key, metadata - FROM things th - WHERE th.owner = :owner AND th.id NOT IN - (SELECT id FROM things th - INNER JOIN connections conn - ON th.id = conn.thing_id - WHERE th.owner = :owner AND conn.channel_id = :channel) - ORDER BY %s %s - LIMIT :limit - OFFSET :offset;`, oq, dq) - - qc = `SELECT COUNT(*) - FROM things th - WHERE th.owner = $1 AND th.id NOT IN - (SELECT id FROM things th - INNER JOIN connections conn - ON th.id = conn.thing_id - WHERE th.owner = $1 AND conn.channel_id = $2);` - default: - q = fmt.Sprintf(`SELECT id, name, key, metadata - FROM things th - INNER JOIN connections conn - ON th.id = conn.thing_id - WHERE th.owner = :owner AND conn.channel_id = :channel - ORDER BY %s %s - LIMIT :limit - OFFSET :offset;`, oq, dq) - - qc = `SELECT COUNT(*) - FROM things th - INNER JOIN connections conn - ON th.id = conn.thing_id - WHERE th.owner = $1 AND conn.channel_id = $2;` - } - - params := map[string]interface{}{ - "owner": owner, - "channel": chID, - "limit": pm.Limit, - "offset": pm.Offset, - } - - rows, err := tr.db.NamedQueryContext(ctx, q, params) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - var items []things.Thing - for rows.Next() { - dbth := dbThing{Owner: owner} - if err := rows.StructScan(&dbth); err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - th, err := toThing(dbth) - if err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - items = append(items, th) - } - - var total uint64 - if err := tr.db.GetContext(ctx, &total, qc, owner, chID); err != nil { - return things.Page{}, errors.Wrap(errors.ErrViewEntity, err) - } - - return things.Page{ - Things: items, - PageMetadata: things.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - }, - }, nil -} - -func (tr thingRepository) Remove(ctx context.Context, owner, id string) error { - dbth := dbThing{ - ID: id, - Owner: owner, - } - q := `DELETE FROM things WHERE id = :id` - if _, err := tr.db.NamedExecContext(ctx, q, dbth); err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - return nil -} - -type dbThing struct { - ID string `db:"id"` - Owner string `db:"owner"` - Name string `db:"name"` - Key string `db:"key"` - Metadata []byte `db:"metadata"` -} - -func toDBThing(th things.Thing) (dbThing, error) { - data := []byte("{}") - if len(th.Metadata) > 0 { - b, err := json.Marshal(th.Metadata) - if err != nil { - return dbThing{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - data = b - } - - return dbThing{ - ID: th.ID, - Owner: th.Owner, - Name: th.Name, - Key: th.Key, - Metadata: data, - }, nil -} - -func toThing(dbth dbThing) (things.Thing, error) { - var metadata map[string]interface{} - if err := json.Unmarshal([]byte(dbth.Metadata), &metadata); err != nil { - return things.Thing{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return things.Thing{ - ID: dbth.ID, - Owner: dbth.Owner, - Name: dbth.Name, - Key: dbth.Key, - Metadata: metadata, - }, nil -} diff --git a/things/postgres/things_test.go b/things/postgres/things_test.go deleted file mode 100644 index ff5687a111..0000000000 --- a/things/postgres/things_test.go +++ /dev/null @@ -1,805 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres_test - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "testing" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - "github.com/mainflux/mainflux/things/postgres" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const maxNameSize = 1024 - -var ( - invalidName = strings.Repeat("m", maxNameSize+1) - idProvider = uuid.New() -) - -func TestThingsSave(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - email := "thing-save@example.com" - - nonexistentThingKey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - ths := []things.Thing{} - for i := 1; i <= 5; i++ { - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - thing := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - ths = append(ths, thing) - } - thkey := ths[0].Key - thID := ths[0].ID - - cases := []struct { - desc string - things []things.Thing - err error - }{ - { - desc: "create new things", - things: ths, - err: nil, - }, - { - desc: "create things that already exist", - things: ths, - err: errors.ErrConflict, - }, - { - desc: "create thing with invalid ID", - things: []things.Thing{ - {ID: "invalid", Owner: email, Key: thkey}, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "create thing with invalid name", - things: []things.Thing{ - {ID: thID, Owner: email, Key: thkey, Name: invalidName}, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "create thing with invalid Key", - things: []things.Thing{ - {ID: thID, Owner: email, Key: nonexistentThingKey}, - }, - err: errors.ErrConflict, - }, - { - desc: "create things with conflicting keys", - things: ths, - err: errors.ErrConflict, - }, - } - - for _, tc := range cases { - resp, err := thingRepo.Save(context.Background(), tc.things...) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - if err == nil { - assert.Equal(t, tc.things, resp, fmt.Sprintf("%s: got incorrect list of things from Save()", tc.desc)) - } - } -} - -func TestThingUpdate(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - email := "thing-update@example.com" - validName := "mfx_device" - - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - thing := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - - sths, err := thingRepo.Save(context.Background(), thing) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - thing.ID = sths[0].ID - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - thing things.Thing - err error - }{ - { - desc: "update existing thing", - thing: thing, - err: nil, - }, - { - desc: "update non-existing thing with existing user", - thing: things.Thing{ - ID: nonexistentThingID, - Owner: email, - }, - err: errors.ErrNotFound, - }, - { - desc: "update existing thing ID with non-existing user", - thing: things.Thing{ - ID: thing.ID, - Owner: wrongValue, - }, - err: nil, - }, - { - desc: "update non-existing thing with non-existing user", - thing: things.Thing{ - ID: nonexistentThingID, - Owner: wrongValue, - }, - err: errors.ErrNotFound, - }, - { - desc: "update thing with valid name", - thing: things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - Name: validName, - }, - err: nil, - }, - { - desc: "update thing with invalid name", - thing: things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - Name: invalidName, - }, - err: errors.ErrMalformedEntity, - }, - } - - for _, tc := range cases { - err := thingRepo.Update(context.Background(), tc.thing) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestUpdateKey(t *testing.T) { - email := "thing-update=key@example.com" - newKey := "new-key" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th1 := things.Thing{ - ID: id, - Owner: email, - Key: key, - } - ths, err := thingRepo.Save(context.Background(), th1) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th1.ID = ths[0].ID - - id, err = idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err = idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th2 := things.Thing{ - ID: id, - Owner: email, - Key: key, - } - ths, err = thingRepo.Save(context.Background(), th2) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th2.ID = ths[0].ID - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - id string - key string - err error - }{ - { - desc: "update key of an existing thing", - owner: th2.Owner, - id: th2.ID, - key: newKey, - err: nil, - }, - { - desc: "update key of a non-existing thing with existing user", - owner: th2.Owner, - id: nonexistentThingID, - key: newKey, - err: errors.ErrNotFound, - }, - { - desc: "update key of an existing thing with non-existing user", - owner: wrongValue, - id: th2.ID, - key: newKey, - err: errors.ErrNotFound, - }, - { - desc: "update key of a non-existing thing with non-existing user", - owner: wrongValue, - id: nonexistentThingID, - key: newKey, - err: errors.ErrNotFound, - }, - { - desc: "update key with existing key value", - owner: th2.Owner, - id: th2.ID, - key: th1.Key, - err: errors.ErrConflict, - }, - } - - for _, tc := range cases { - err := thingRepo.UpdateKey(context.Background(), tc.owner, tc.id, tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestSingleThingRetrieval(t *testing.T) { - email := "thing-single-retrieval@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - ID: id, - Owner: email, - Key: key, - Metadata: make(things.Metadata), - } - - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th.ID = ths[0].ID - - nonexistentThingID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - ID string - response things.Thing - err error - }{ - { - desc: "retrieve thing with existing user", - owner: th.Owner, - ID: th.ID, - response: th, - err: nil, - }, - { - desc: "retrieve non-existing thing with existing user", - owner: th.Owner, - ID: nonexistentThingID, - response: things.Thing{}, - err: errors.ErrNotFound, - }, - { - desc: "retrieve thing with malformed ID", - owner: th.Owner, - ID: wrongValue, - response: things.Thing{}, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - resp, err := thingRepo.RetrieveByID(context.Background(), tc.owner, tc.ID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, resp, fmt.Sprintf("%s: got incorrect response from RetrieveByID", tc.desc)) - } -} - -func TestThingRetrieveByKey(t *testing.T) { - email := "thing-retrieved-by-key@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - th := things.Thing{ - ID: id, - Owner: email, - Key: key, - } - - ths, err := thingRepo.Save(context.Background(), th) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th.ID = ths[0].ID - - cases := []struct { - desc string - key string - ID string - err error - }{ - { - desc: "retrieve existing thing by key", - key: th.Key, - ID: th.ID, - err: nil, - }, - { - desc: "retrieve non-existent thing by key", - key: wrongValue, - ID: "", - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - id, err := thingRepo.RetrieveByKey(context.Background(), tc.key) - assert.Equal(t, tc.ID, id, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.ID, id)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestMultiThingRetrieval(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - err := cleanTestTable(context.Background(), "things", dbMiddleware) - require.Nil(t, err, fmt.Sprintf("cleaning table 'things' expected to success %v", err)) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - email := "thing-multi-retrieval@example.com" - name := "thing_name" - metaStr := `{"field1":"value1","field2":{"subfield11":"value2","subfield12":{"subfield121":"value3","subfield122":"value4"}}}` - subMetaStr := `{"field2":{"subfield12":{"subfield121":"value3"}}}` - - metadata := things.Metadata{} - err = json.Unmarshal([]byte(metaStr), &metadata) - assert.Nil(t, err, fmt.Sprintf("got expected error while unmarshalling %s\n", err)) - - subMeta := things.Metadata{} - err = json.Unmarshal([]byte(subMetaStr), &subMeta) - assert.Nil(t, err, fmt.Sprintf("got expected error while unmarshalling %s\n", err)) - - wrongMeta := things.Metadata{ - "field": "value1", - } - - offset := uint64(1) - nameNum := uint64(3) - metaNum := uint64(3) - nameMetaNum := uint64(2) - - n := uint64(10) - for i := uint64(0); i < n; i++ { - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - Owner: email, - ID: id, - Key: key, - } - - // Create Things with name. - if i < nameNum { - th.Name = fmt.Sprintf("%s-%d", name, i) - } - // Create Things with metadata. - if i >= nameNum && i < nameNum+metaNum { - th.Metadata = metadata - } - // Create Things with name and metadata. - if i >= n-nameMetaNum { - th.Metadata = metadata - th.Name = name - } - - _, err = thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - } - - cases := []struct { - desc string - owner string - pageMetadata things.PageMetadata - size uint64 - }{ - { - desc: "retrieve all things", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - }, - size: n, - }, - { - desc: "retrieve subset of things with existing owner", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - Total: n, - }, - size: n / 2, - }, - { - desc: "retrieve things with existing name", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 1, - Limit: n, - Name: name, - Total: nameNum + nameMetaNum, - }, - size: nameNum + nameMetaNum - offset, - }, - { - desc: "retrieve things with non-existing name", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Name: "wrong", - Total: 0, - }, - size: 0, - }, - { - desc: "retrieve things with existing metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: metaNum + nameMetaNum, - Metadata: metadata, - }, - size: metaNum + nameMetaNum, - }, - { - desc: "retrieve things with partial metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: metaNum + nameMetaNum, - Metadata: subMeta, - }, - size: metaNum + nameMetaNum, - }, - { - desc: "retrieve things with non-existing metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: 0, - Metadata: wrongMeta, - }, - size: 0, - }, - { - desc: "retrieve all things with existing name and metadata", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: nameMetaNum, - Name: name, - Metadata: metadata, - }, - size: nameMetaNum, - }, - { - desc: "retrieve things sorted by name ascendent", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - Order: "name", - Dir: "asc", - }, - size: n, - }, - { - desc: "retrieve things sorted by name descendent", - owner: email, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Total: n, - Order: "name", - Dir: "desc", - }, - size: n, - }, - } - - for _, tc := range cases { - page, err := thingRepo.RetrieveAll(context.Background(), tc.owner, tc.pageMetadata) - size := uint64(len(page.Things)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.Equal(t, tc.pageMetadata.Total, page.Total, fmt.Sprintf("%s: expected total %d got %d\n", tc.desc, tc.pageMetadata.Total, page.Total)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", tc.desc, err)) - - // Check if Things list have been sorted properly - testSortThings(t, tc.pageMetadata, page.Things) - } -} - -func TestMultiThingRetrievalByChannel(t *testing.T) { - email := "thing-multi-retrieval-by-channel@example.com" - - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - channelRepo := postgres.NewChannelRepository(dbMiddleware) - - n := uint64(10) - thsDisconNum := uint64(1) - - chID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - _, err = channelRepo.Save(context.Background(), things.Channel{ - ID: chID, - Owner: email, - }) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - for i := uint64(0); i < n; i++ { - thID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thkey, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - th := things.Thing{ - ID: thID, - Owner: email, - Key: thkey, - } - - ths, err := thingRepo.Save(context.Background(), th) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - thID = ths[0].ID - - // Don't connnect last Thing - if i == n-thsDisconNum { - break - } - - err = channelRepo.Connect(context.Background(), email, []string{chID}, []string{thID}) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - nonexistentChanID, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - - cases := []struct { - desc string - owner string - chID string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "retrieve all things by channel with existing owner", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n - thsDisconNum, - }, - { - desc: "retrieve subset of things by channel with existing owner", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: (n / 2) - thsDisconNum, - }, - { - desc: "retrieve things by channel with non-existing owner", - owner: wrongValue, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - }, - { - desc: "retrieve things by non-existing channel", - owner: email, - chID: nonexistentChanID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - }, - { - desc: "retrieve things with malformed UUID", - owner: email, - chID: wrongValue, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - err: errors.ErrNotFound, - }, - { - desc: "retrieve all non connected things by channel with existing owner", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - }, - size: thsDisconNum, - }, - { - desc: "retrieve all things by channel sorted by name ascendent", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n - thsDisconNum, - }, - { - desc: "retrieve all non-connected things by channel sorted by name ascendent", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "asc", - }, - size: thsDisconNum, - }, - { - desc: "retrieve all things by channel sorted by name descendent", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n - thsDisconNum, - }, - { - desc: "retrieve all non-connected things by channel sorted by name descendent", - owner: email, - chID: chID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "desc", - }, - size: thsDisconNum, - }, - } - - for _, tc := range cases { - page, err := thingRepo.RetrieveByChannel(context.Background(), tc.owner, tc.chID, tc.pageMetadata) - size := uint64(len(page.Things)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected no error got %d\n", tc.desc, err)) - - // Check if Things by Channel list have been sorted properly - testSortThings(t, tc.pageMetadata, page.Things) - } -} - -func TestThingRemoval(t *testing.T) { - email := "thing-removal@example.com" - dbMiddleware := postgres.NewDatabase(db) - thingRepo := postgres.NewThingRepository(dbMiddleware) - - id, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - thing := things.Thing{ - ID: id, - Owner: email, - Key: key, - } - - ths, _ := thingRepo.Save(context.Background(), thing) - thing.ID = ths[0].ID - - // show that the removal works the same for both existing and non-existing - // (removed) thing - for i := 0; i < 2; i++ { - err := thingRepo.Remove(context.Background(), email, thing.ID) - assert.Nil(t, err, fmt.Sprintf("#%d: failed to remove thing due to: %s", i, err)) - - _, err = thingRepo.RetrieveByID(context.Background(), email, thing.ID) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("#%d: expected %s got %s", i, errors.ErrNotFound, err)) - } -} - -func testSortThings(t *testing.T, pm things.PageMetadata, ths []things.Thing) { - switch pm.Order { - case "name": - current := ths[0] - for _, res := range ths { - if pm.Dir == "asc" { - assert.GreaterOrEqual(t, res.Name, current.Name) - } - if pm.Dir == "desc" { - assert.GreaterOrEqual(t, current.Name, res.Name) - } - current = res - } - default: - break - } -} - -func cleanTestTable(ctx context.Context, table string, db postgres.Database) error { - q := fmt.Sprintf(`DELETE FROM %s CASCADE;`, table) - _, err := db.NamedExecContext(ctx, q, map[string]interface{}{}) - return err -} diff --git a/things/redis/channels.go b/things/redis/channels.go deleted file mode 100644 index 591dbfd654..0000000000 --- a/things/redis/channels.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis - -import ( - "context" - "fmt" - - "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -const chanPrefix = "channel" - -var _ things.ChannelCache = (*channelCache)(nil) - -type channelCache struct { - client *redis.Client -} - -// NewChannelCache returns redis channel cache implementation. -func NewChannelCache(client *redis.Client) things.ChannelCache { - return channelCache{client: client} -} - -func (cc channelCache) Connect(ctx context.Context, chanID, thingID string) error { - cid, tid := kv(chanID, thingID) - if err := cc.client.SAdd(ctx, cid, tid).Err(); err != nil { - return errors.Wrap(errors.ErrCreateEntity, err) - } - return nil -} - -func (cc channelCache) HasThing(ctx context.Context, chanID, thingID string) bool { - cid, tid := kv(chanID, thingID) - return cc.client.SIsMember(ctx, cid, tid).Val() -} - -func (cc channelCache) Disconnect(ctx context.Context, chanID, thingID string) error { - cid, tid := kv(chanID, thingID) - if err := cc.client.SRem(ctx, cid, tid).Err(); err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - return nil -} - -func (cc channelCache) Remove(ctx context.Context, chanID string) error { - cid, _ := kv(chanID, "0") - if err := cc.client.Del(ctx, cid).Err(); err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - return nil -} - -// Generates key-value pair -func kv(chanID, thingID string) (string, string) { - cid := fmt.Sprintf("%s:%s", chanPrefix, chanID) - return cid, thingID -} diff --git a/things/redis/channels_test.go b/things/redis/channels_test.go deleted file mode 100644 index 9613e70746..0000000000 --- a/things/redis/channels_test.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis_test - -import ( - "context" - "fmt" - "testing" - - "github.com/mainflux/mainflux/things/redis" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestConnect(t *testing.T) { - channelCache := redis.NewChannelCache(redisClient) - - cid := "123" - tid := "321" - - cases := []struct { - desc string - cid string - tid string - }{ - { - desc: "connect thing to channel", - cid: cid, - tid: tid, - }, - { - desc: "connect already connected thing to channel", - cid: cid, - tid: tid, - }, - } - for _, tc := range cases { - err := channelCache.Connect(context.Background(), cid, tid) - assert.Nil(t, err, fmt.Sprintf("%s: fail to connect due to: %s\n", tc.desc, err)) - } -} - -func TestHasThing(t *testing.T) { - channelCache := redis.NewChannelCache(redisClient) - - cid := "123" - tid := "321" - - err := channelCache.Connect(context.Background(), cid, tid) - require.Nil(t, err, fmt.Sprintf("connect thing to channel: fail to connect due to: %s\n", err)) - - cases := map[string]struct { - cid string - tid string - hasAccess bool - }{ - "access check for thing that has access": { - cid: cid, - tid: tid, - hasAccess: true, - }, - "access check for thing without access": { - cid: cid, - tid: cid, - hasAccess: false, - }, - "access check for non-existing channel": { - cid: tid, - tid: tid, - hasAccess: false, - }, - } - - for desc, tc := range cases { - hasAccess := channelCache.HasThing(context.Background(), tc.cid, tc.tid) - assert.Equal(t, tc.hasAccess, hasAccess, fmt.Sprintf("%s: expected %t got %t\n", desc, tc.hasAccess, hasAccess)) - } -} -func TestDisconnect(t *testing.T) { - channelCache := redis.NewChannelCache(redisClient) - - cid := "123" - tid := "321" - tid2 := "322" - - err := channelCache.Connect(context.Background(), cid, tid) - require.Nil(t, err, fmt.Sprintf("connect thing to channel: fail to connect due to: %s\n", err)) - - cases := []struct { - desc string - cid string - tid string - hasAccess bool - }{ - { - desc: "disconnecting connected thing", - cid: cid, - tid: tid, - hasAccess: false, - }, - { - desc: "disconnecting non-connected thing", - cid: cid, - tid: tid2, - hasAccess: false, - }, - } - for _, tc := range cases { - err := channelCache.Disconnect(context.Background(), tc.cid, tc.tid) - assert.Nil(t, err, fmt.Sprintf("%s: fail due to: %s\n", tc.desc, err)) - - hasAccess := channelCache.HasThing(context.Background(), tc.cid, tc.tid) - assert.Equal(t, tc.hasAccess, hasAccess, fmt.Sprintf("access check after %s: expected %t got %t\n", tc.desc, tc.hasAccess, hasAccess)) - } -} - -func TestRemove(t *testing.T) { - channelCache := redis.NewChannelCache(redisClient) - - cid := "123" - cid2 := "124" - tid := "321" - - err := channelCache.Connect(context.Background(), cid, tid) - require.Nil(t, err, fmt.Sprintf("connect thing to channel: fail to connect due to: %s\n", err)) - - cases := []struct { - desc string - cid string - tid string - err error - hasAccess bool - }{ - { - desc: "Remove channel from cache", - cid: cid, - tid: tid, - err: nil, - hasAccess: false, - }, - { - desc: "Remove non-cached channel from cache", - cid: cid2, - tid: tid, - err: nil, - hasAccess: false, - }, - } - - for _, tc := range cases { - err := channelCache.Remove(context.Background(), tc.cid) - assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - hasAcces := channelCache.HasThing(context.Background(), tc.cid, tc.tid) - assert.Equal(t, tc.hasAccess, hasAcces, "%s - check access after removing channel: expected %t got %t\n", tc.desc, tc.hasAccess, hasAcces) - } -} diff --git a/things/redis/events.go b/things/redis/events.go deleted file mode 100644 index 77b09e1b49..0000000000 --- a/things/redis/events.go +++ /dev/null @@ -1,196 +0,0 @@ -package redis - -import "encoding/json" - -const ( - thingPrefix = "thing." - thingCreate = thingPrefix + "create" - thingUpdate = thingPrefix + "update" - thingRemove = thingPrefix + "remove" - thingConnect = thingPrefix + "connect" - thingDisconnect = thingPrefix + "disconnect" - - channelPrefix = "channel." - channelCreate = channelPrefix + "create" - channelUpdate = channelPrefix + "update" - channelRemove = channelPrefix + "remove" -) - -type event interface { - Encode() map[string]interface{} -} - -var ( - _ event = (*createThingEvent)(nil) - _ event = (*updateThingEvent)(nil) - _ event = (*removeThingEvent)(nil) - _ event = (*createChannelEvent)(nil) - _ event = (*updateChannelEvent)(nil) - _ event = (*removeChannelEvent)(nil) - _ event = (*connectThingEvent)(nil) - _ event = (*disconnectThingEvent)(nil) -) - -type createThingEvent struct { - id string - owner string - name string - metadata map[string]interface{} -} - -func (cte createThingEvent) Encode() map[string]interface{} { - val := map[string]interface{}{ - "id": cte.id, - "owner": cte.owner, - "operation": thingCreate, - } - - if cte.name != "" { - val["name"] = cte.name - } - - if cte.metadata != nil { - metadata, err := json.Marshal(cte.metadata) - if err != nil { - return val - } - - val["metadata"] = string(metadata) - } - - return val -} - -type updateThingEvent struct { - id string - name string - metadata map[string]interface{} -} - -func (ute updateThingEvent) Encode() map[string]interface{} { - val := map[string]interface{}{ - "id": ute.id, - "operation": thingUpdate, - } - - if ute.name != "" { - val["name"] = ute.name - } - - if ute.metadata != nil { - metadata, err := json.Marshal(ute.metadata) - if err != nil { - return val - } - - val["metadata"] = string(metadata) - } - - return val -} - -type removeThingEvent struct { - id string -} - -func (rte removeThingEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "id": rte.id, - "operation": thingRemove, - } -} - -type createChannelEvent struct { - id string - owner string - name string - metadata map[string]interface{} -} - -func (cce createChannelEvent) Encode() map[string]interface{} { - val := map[string]interface{}{ - "id": cce.id, - "owner": cce.owner, - "operation": channelCreate, - } - - if cce.name != "" { - val["name"] = cce.name - } - - if cce.metadata != nil { - metadata, err := json.Marshal(cce.metadata) - if err != nil { - return val - } - - val["metadata"] = string(metadata) - } - - return val -} - -type updateChannelEvent struct { - id string - name string - metadata map[string]interface{} -} - -func (uce updateChannelEvent) Encode() map[string]interface{} { - val := map[string]interface{}{ - "id": uce.id, - "operation": channelUpdate, - } - - if uce.name != "" { - val["name"] = uce.name - } - - if uce.metadata != nil { - metadata, err := json.Marshal(uce.metadata) - if err != nil { - return val - } - - val["metadata"] = string(metadata) - } - - return val -} - -type removeChannelEvent struct { - id string -} - -func (rce removeChannelEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "id": rce.id, - "operation": channelRemove, - } -} - -type connectThingEvent struct { - chanID string - thingID string -} - -func (cte connectThingEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "chan_id": cte.chanID, - "thing_id": cte.thingID, - "operation": thingConnect, - } -} - -type disconnectThingEvent struct { - chanID string - thingID string -} - -func (dte disconnectThingEvent) Encode() map[string]interface{} { - return map[string]interface{}{ - "chan_id": dte.chanID, - "thing_id": dte.thingID, - "operation": thingDisconnect, - } -} diff --git a/things/redis/setup_test.go b/things/redis/setup_test.go deleted file mode 100644 index 08c1dc58e4..0000000000 --- a/things/redis/setup_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis_test - -import ( - "context" - "fmt" - "log" - "os" - "testing" - - "github.com/go-redis/redis/v8" - dockertest "github.com/ory/dockertest/v3" -) - -const ( - wrongValue = "wrong-value" -) - -var redisClient *redis.Client - -func TestMain(m *testing.M) { - pool, err := dockertest.NewPool("") - if err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - container, err := pool.Run("redis", "5.0-alpine", nil) - if err != nil { - log.Fatalf("Could not start container: %s", err) - } - - if err := pool.Retry(func() error { - redisClient = redis.NewClient(&redis.Options{ - Addr: fmt.Sprintf("localhost:%s", container.GetPort("6379/tcp")), - Password: "", - DB: 0, - }) - - return redisClient.Ping(context.Background()).Err() - }); err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - code := m.Run() - - if err := pool.Purge(container); err != nil { - log.Fatalf("Could not purge container: %s", err) - } - - os.Exit(code) -} diff --git a/things/redis/streams.go b/things/redis/streams.go deleted file mode 100644 index 4ff8b7e47d..0000000000 --- a/things/redis/streams.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis - -import ( - "context" - - "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/things" -) - -const ( - streamID = "mainflux.things" - streamLen = 1000 -) - -var _ things.Service = (*eventStore)(nil) - -type eventStore struct { - svc things.Service - client *redis.Client -} - -// NewEventStoreMiddleware returns wrapper around things service that sends -// events to event store. -func NewEventStoreMiddleware(svc things.Service, client *redis.Client) things.Service { - return eventStore{ - svc: svc, - client: client, - } -} - -func (es eventStore) CreateThings(ctx context.Context, token string, ths ...things.Thing) ([]things.Thing, error) { - sths, err := es.svc.CreateThings(ctx, token, ths...) - if err != nil { - return sths, err - } - - for _, thing := range sths { - event := createThingEvent{ - id: thing.ID, - owner: thing.Owner, - name: thing.Name, - metadata: thing.Metadata, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - - if err = es.client.XAdd(ctx, record).Err(); err != nil { - return sths, err - } - } - - return sths, nil -} - -func (es eventStore) UpdateThing(ctx context.Context, token string, thing things.Thing) error { - if err := es.svc.UpdateThing(ctx, token, thing); err != nil { - return err - } - - event := updateThingEvent{ - id: thing.ID, - name: thing.Name, - metadata: thing.Metadata, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - - return es.client.XAdd(ctx, record).Err() -} - -// UpdateKey doesn't send event because key shouldn't be sent over stream. -// Maybe we can start publishing this event at some point, without key value -// in order to notify adapters to disconnect connected things after key update. -func (es eventStore) UpdateKey(ctx context.Context, token, id, key string) error { - return es.svc.UpdateKey(ctx, token, id, key) -} - -func (es eventStore) ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) error { - return es.svc.ShareThing(ctx, token, thingID, actions, userIDs) -} - -func (es eventStore) ViewThing(ctx context.Context, token, id string) (things.Thing, error) { - return es.svc.ViewThing(ctx, token, id) -} - -func (es eventStore) ListThings(ctx context.Context, token string, pm things.PageMetadata) (things.Page, error) { - return es.svc.ListThings(ctx, token, pm) -} - -func (es eventStore) ListThingsByChannel(ctx context.Context, token, chID string, pm things.PageMetadata) (things.Page, error) { - return es.svc.ListThingsByChannel(ctx, token, chID, pm) -} - -func (es eventStore) RemoveThing(ctx context.Context, token, id string) error { - if err := es.svc.RemoveThing(ctx, token, id); err != nil { - return err - } - - event := removeThingEvent{ - id: id, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - - return es.client.XAdd(ctx, record).Err() -} - -func (es eventStore) CreateChannels(ctx context.Context, token string, channels ...things.Channel) ([]things.Channel, error) { - schs, err := es.svc.CreateChannels(ctx, token, channels...) - if err != nil { - return schs, err - } - - for _, channel := range schs { - event := createChannelEvent{ - id: channel.ID, - owner: channel.Owner, - name: channel.Name, - metadata: channel.Metadata, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - if err = es.client.XAdd(ctx, record).Err(); err != nil { - return schs, err - } - } - - return schs, nil -} - -func (es eventStore) UpdateChannel(ctx context.Context, token string, channel things.Channel) error { - if err := es.svc.UpdateChannel(ctx, token, channel); err != nil { - return err - } - - event := updateChannelEvent{ - id: channel.ID, - name: channel.Name, - metadata: channel.Metadata, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - return es.client.XAdd(ctx, record).Err() -} - -func (es eventStore) ViewChannel(ctx context.Context, token, id string) (things.Channel, error) { - return es.svc.ViewChannel(ctx, token, id) -} - -func (es eventStore) ListChannels(ctx context.Context, token string, pm things.PageMetadata) (things.ChannelsPage, error) { - return es.svc.ListChannels(ctx, token, pm) -} - -func (es eventStore) ListChannelsByThing(ctx context.Context, token, thID string, pm things.PageMetadata) (things.ChannelsPage, error) { - return es.svc.ListChannelsByThing(ctx, token, thID, pm) -} - -func (es eventStore) RemoveChannel(ctx context.Context, token, id string) error { - if err := es.svc.RemoveChannel(ctx, token, id); err != nil { - return err - } - - event := removeChannelEvent{ - id: id, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - return es.client.XAdd(ctx, record).Err() -} - -func (es eventStore) Connect(ctx context.Context, token string, chIDs, thIDs []string) error { - if err := es.svc.Connect(ctx, token, chIDs, thIDs); err != nil { - return err - } - - for _, chID := range chIDs { - for _, thID := range thIDs { - event := connectThingEvent{ - chanID: chID, - thingID: thID, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - if err := es.client.XAdd(ctx, record).Err(); err != nil { - return err - } - } - } - - return nil -} - -func (es eventStore) Disconnect(ctx context.Context, token string, chIDs, thIDs []string) error { - if err := es.svc.Disconnect(ctx, token, chIDs, thIDs); err != nil { - return err - } - - for _, chID := range chIDs { - for _, thID := range thIDs { - event := disconnectThingEvent{ - chanID: chID, - thingID: thID, - } - record := &redis.XAddArgs{ - Stream: streamID, - MaxLenApprox: streamLen, - Values: event.Encode(), - } - if err := es.client.XAdd(ctx, record).Err(); err != nil { - return err - } - } - } - - return nil -} - -func (es eventStore) CanAccessByKey(ctx context.Context, chanID string, key string) (string, error) { - return es.svc.CanAccessByKey(ctx, chanID, key) -} - -func (es eventStore) CanAccessByID(ctx context.Context, chanID string, thingID string) error { - return es.svc.CanAccessByID(ctx, chanID, thingID) -} - -func (es eventStore) IsChannelOwner(ctx context.Context, owner, chanID string) error { - return es.svc.IsChannelOwner(ctx, owner, chanID) -} - -func (es eventStore) Identify(ctx context.Context, key string) (string, error) { - return es.svc.Identify(ctx, key) -} - -func (es eventStore) ListMembers(ctx context.Context, token, groupID string, pm things.PageMetadata) (things.Page, error) { - return es.svc.ListMembers(ctx, token, groupID, pm) -} diff --git a/things/redis/streams_test.go b/things/redis/streams_test.go deleted file mode 100644 index 4f72f286b7..0000000000 --- a/things/redis/streams_test.go +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis_test - -import ( - "context" - "fmt" - "math" - "strconv" - "testing" - "time" - - r "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - "github.com/mainflux/mainflux/things/mocks" - "github.com/mainflux/mainflux/things/redis" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - streamID = "mainflux.things" - email = "user@example.com" - adminEmail = "admin@example.com" - token = "token" - thingPrefix = "thing." - thingCreate = thingPrefix + "create" - thingUpdate = thingPrefix + "update" - thingRemove = thingPrefix + "remove" - thingConnect = thingPrefix + "connect" - thingDisconnect = thingPrefix + "disconnect" - - channelPrefix = "channel." - channelCreate = channelPrefix + "create" - channelUpdate = channelPrefix + "update" - channelRemove = channelPrefix + "remove" -) - -func newService(tokens map[string]string) things.Service { - userPolicy := mocks.MockSubjectSet{Object: "users", Relation: "member"} - adminPolicy := mocks.MockSubjectSet{Object: "authorities", Relation: "member"} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{ - adminEmail: {userPolicy, adminPolicy}, email: {userPolicy}}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} - -func TestCreateThings(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - ths []things.Thing - key string - err error - event map[string]interface{} - }{ - { - desc: "create things successfully", - ths: []things.Thing{{ - Name: "a", - Metadata: map[string]interface{}{"test": "test"}, - }}, - key: token, - err: nil, - event: map[string]interface{}{ - "id": "123e4567-e89b-12d3-a456-000000000001", - "name": "a", - "owner": email, - "metadata": "{\"test\":\"test\"}", - "operation": thingCreate, - }, - }, - { - desc: "create things with invalid credentials", - ths: []things.Thing{{Name: "a", Metadata: map[string]interface{}{"test": "test"}}}, - key: "", - err: errors.ErrAuthentication, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - _, err := svc.CreateThings(context.Background(), tc.key, tc.ths...) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: got incorrect event\n", tc.desc)) - } -} - -func TestUpdateThing(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - th := things.Thing{Name: "a", Metadata: map[string]interface{}{"test": "test"}} - sths, err := svc.CreateThings(context.Background(), token, th) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - thing things.Thing - key string - err error - event map[string]interface{} - }{ - { - desc: "update existing thing successfully", - thing: things.Thing{ - ID: sth.ID, - Name: "a", - Metadata: map[string]interface{}{"test": "test"}, - }, - key: token, - err: nil, - event: map[string]interface{}{ - "id": sth.ID, - "name": "a", - "metadata": "{\"test\":\"test\"}", - "operation": thingUpdate, - }, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.UpdateThing(context.Background(), tc.key, tc.thing) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: got incorrect event\n", tc.desc)) - } -} - -func TestViewThing(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - esth, eserr := essvc.ViewThing(context.Background(), token, sth.ID) - th, err := svc.ViewThing(context.Background(), token, sth.ID) - assert.Equal(t, th, esth, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", th, esth)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestListThings(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - _, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - esths, eserr := essvc.ListThings(context.Background(), token, things.PageMetadata{Offset: 0, Limit: 10}) - ths, err := svc.ListThings(context.Background(), token, things.PageMetadata{Offset: 0, Limit: 10}) - assert.Equal(t, ths, esths, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", ths, esths)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestListThingsByChannel(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - err = svc.Connect(context.Background(), token, []string{sch.ID}, []string{sth.ID}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - esths, eserr := essvc.ListThingsByChannel(context.Background(), token, sch.ID, things.PageMetadata{Offset: 0, Limit: 10}) - thps, err := svc.ListThingsByChannel(context.Background(), token, sch.ID, things.PageMetadata{Offset: 0, Limit: 10}) - assert.Equal(t, thps, esths, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", thps, esths)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestRemoveThing(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - id string - key string - err error - event map[string]interface{} - }{ - { - desc: "delete existing thing successfully", - id: sth.ID, - key: token, - err: nil, - event: map[string]interface{}{ - "id": sth.ID, - "operation": thingRemove, - }, - }, - { - desc: "delete thing with invalid credentials", - id: strconv.FormatUint(math.MaxUint64, 10), - key: "", - err: errors.ErrAuthentication, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.RemoveThing(context.Background(), tc.key, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s:got incorrect event\n", tc.desc)) - } -} - -func TestCreateChannels(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - chs []things.Channel - key string - err error - event map[string]interface{} - }{ - { - desc: "create channels successfully", - chs: []things.Channel{{Name: "a", Metadata: map[string]interface{}{"test": "test"}}}, - key: token, - err: nil, - event: map[string]interface{}{ - "id": "123e4567-e89b-12d3-a456-000000000001", - "name": "a", - "metadata": "{\"test\":\"test\"}", - "owner": email, - "operation": channelCreate, - }, - }, - { - desc: "create channels with invalid credentials", - chs: []things.Channel{{Name: "a", Metadata: map[string]interface{}{"test": "test"}}}, - key: "", - err: errors.ErrAuthentication, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - _, err := svc.CreateChannels(context.Background(), tc.key, tc.chs...) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.event, event)) - } -} - -func TestUpdateChannel(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: adminEmail}) - // Create channel without sending event. - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - channel things.Channel - key string - err error - event map[string]interface{} - }{ - { - desc: "update channel successfully", - channel: things.Channel{ - ID: sch.ID, - Name: "b", - Metadata: map[string]interface{}{"test": "test"}, - }, - key: token, - err: nil, - event: map[string]interface{}{ - "id": sch.ID, - "name": "b", - "metadata": "{\"test\":\"test\"}", - "operation": channelUpdate, - }, - }, - { - desc: "create non-existent channel", - channel: things.Channel{ - ID: strconv.FormatUint(math.MaxUint64, 10), - Name: "c", - }, - key: token, - err: errors.ErrNotFound, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.UpdateChannel(context.Background(), tc.key, tc.channel) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: got incorrect event\n", tc.desc)) - } -} - -func TestViewChannel(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create channel without sending event. - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - esch, eserr := essvc.ViewChannel(context.Background(), token, sch.ID) - ch, err := svc.ViewChannel(context.Background(), token, sch.ID) - assert.Equal(t, ch, esch, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", ch, esch)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestListChannels(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - _, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - assert.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - eschs, eserr := essvc.ListChannels(context.Background(), token, things.PageMetadata{Offset: 0, Limit: 10}) - chs, err := svc.ListChannels(context.Background(), token, things.PageMetadata{Offset: 0, Limit: 10}) - assert.Equal(t, chs, eschs, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", chs, eschs)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestListChannelsByThing(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing without sending event. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - err = svc.Connect(context.Background(), token, []string{sch.ID}, []string{sth.ID}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - - essvc := redis.NewEventStoreMiddleware(svc, redisClient) - eschs, eserr := essvc.ListChannelsByThing(context.Background(), token, sth.ID, things.PageMetadata{Offset: 0, Limit: 10}) - chps, err := svc.ListChannelsByThing(context.Background(), token, sth.ID, things.PageMetadata{Offset: 0, Limit: 10}) - assert.Equal(t, chps, eschs, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", chps, eschs)) - assert.Equal(t, err, eserr, fmt.Sprintf("event sourcing changed service behavior: expected %v got %v", err, eserr)) -} - -func TestRemoveChannel(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: adminEmail}) - // Create channel without sending event. - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - id string - key string - err error - event map[string]interface{} - }{ - { - desc: "update channel successfully", - id: sch.ID, - key: token, - err: nil, - event: map[string]interface{}{ - "id": sch.ID, - "operation": channelRemove, - }, - }, - { - desc: "create non-existent channel", - id: strconv.FormatUint(math.MaxUint64, 10), - key: "", - err: errors.ErrAuthentication, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.RemoveChannel(context.Background(), tc.key, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s:got incorrect event\n", tc.desc)) - } -} - -func TestConnectEvent(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing and channel that will be connected. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - thingID string - chanID string - key string - err error - event map[string]interface{} - }{ - { - desc: "connect existing thing to existing channel", - thingID: sth.ID, - chanID: sch.ID, - key: token, - err: nil, - event: map[string]interface{}{ - "chan_id": sch.ID, - "thing_id": sth.ID, - "operation": thingConnect, - }, - }, - { - desc: "connect non-existent thing to channel", - thingID: strconv.FormatUint(math.MaxUint64, 10), - chanID: sch.ID, - key: token, - err: errors.ErrNotFound, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.Connect(context.Background(), tc.key, []string{tc.chanID}, []string{tc.thingID}) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: got incorrect event\n", tc.desc)) - } -} - -func TestDisconnectEvent(t *testing.T) { - _ = redisClient.FlushAll(context.Background()).Err() - - svc := newService(map[string]string{token: email}) - // Create thing and channel that will be connected. - sths, err := svc.CreateThings(context.Background(), token, things.Thing{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sth := sths[0] - schs, err := svc.CreateChannels(context.Background(), token, things.Channel{Name: "a"}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - sch := schs[0] - err = svc.Connect(context.Background(), token, []string{sch.ID}, []string{sth.ID}) - require.Nil(t, err, fmt.Sprintf("got unexpected error %s", err)) - - svc = redis.NewEventStoreMiddleware(svc, redisClient) - - cases := []struct { - desc string - thingID string - chanID string - key string - err error - event map[string]interface{} - }{ - { - desc: "disconnect thing from channel", - thingID: sth.ID, - chanID: sch.ID, - key: token, - err: nil, - event: map[string]interface{}{ - "chan_id": sch.ID, - "thing_id": sth.ID, - "operation": thingDisconnect, - }, - }, - { - desc: "disconnect non-existent thing from channel", - thingID: strconv.FormatUint(math.MaxUint64, 10), - chanID: sch.ID, - key: token, - err: errors.ErrNotFound, - event: nil, - }, - } - - lastID := "0" - for _, tc := range cases { - err := svc.Disconnect(context.Background(), tc.key, []string{tc.chanID}, []string{tc.thingID}) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - streams := redisClient.XRead(context.Background(), &r.XReadArgs{ - Streams: []string{streamID, lastID}, - Count: 1, - Block: time.Second, - }).Val() - - var event map[string]interface{} - if len(streams) > 0 && len(streams[0].Messages) > 0 { - msg := streams[0].Messages[0] - event = msg.Values - lastID = msg.ID - } - - assert.Equal(t, tc.event, event, fmt.Sprintf("%s: got incorrect event\n", tc.desc)) - } -} diff --git a/things/redis/things.go b/things/redis/things.go deleted file mode 100644 index dea48102dc..0000000000 --- a/things/redis/things.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis - -import ( - "context" - "fmt" - - "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things" -) - -const ( - keyPrefix = "thing_key" - idPrefix = "thing" -) - -var _ things.ThingCache = (*thingCache)(nil) - -type thingCache struct { - client *redis.Client -} - -// NewThingCache returns redis thing cache implementation. -func NewThingCache(client *redis.Client) things.ThingCache { - return &thingCache{ - client: client, - } -} - -func (tc *thingCache) Save(ctx context.Context, thingKey string, thingID string) error { - tkey := fmt.Sprintf("%s:%s", keyPrefix, thingKey) - if err := tc.client.Set(ctx, tkey, thingID, 0).Err(); err != nil { - return errors.Wrap(errors.ErrCreateEntity, err) - } - - tid := fmt.Sprintf("%s:%s", idPrefix, thingID) - if err := tc.client.Set(ctx, tid, thingKey, 0).Err(); err != nil { - return errors.Wrap(errors.ErrCreateEntity, err) - } - return nil -} - -func (tc *thingCache) ID(ctx context.Context, thingKey string) (string, error) { - tkey := fmt.Sprintf("%s:%s", keyPrefix, thingKey) - thingID, err := tc.client.Get(ctx, tkey).Result() - if err != nil { - return "", errors.Wrap(errors.ErrNotFound, err) - } - - return thingID, nil -} - -func (tc *thingCache) Remove(ctx context.Context, thingID string) error { - tid := fmt.Sprintf("%s:%s", idPrefix, thingID) - key, err := tc.client.Get(ctx, tid).Result() - // Redis returns Nil Reply when key does not exist. - if err == redis.Nil { - return nil - } - if err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - - tkey := fmt.Sprintf("%s:%s", keyPrefix, key) - if err := tc.client.Del(ctx, tkey, tid).Err(); err != nil { - return errors.Wrap(errors.ErrRemoveEntity, err) - } - return nil -} diff --git a/things/redis/things_test.go b/things/redis/things_test.go deleted file mode 100644 index d4780e56a9..0000000000 --- a/things/redis/things_test.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package redis_test - -import ( - "context" - "fmt" - "testing" - - r "github.com/go-redis/redis/v8" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things/redis" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var idProvider = uuid.New() - -func TestThingSave(t *testing.T) { - thingCache := redis.NewThingCache(redisClient) - key, err := idProvider.ID() - require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - id := "123" - id2 := "124" - - err = thingCache.Save(context.Background(), key, id2) - require.Nil(t, err, fmt.Sprintf("Save thing to cache: expected nil got %s", err)) - - cases := []struct { - desc string - ID string - key string - err error - }{ - { - desc: "Save thing to cache", - ID: id, - key: key, - err: nil, - }, - { - desc: "Save already cached thing to cache", - ID: id2, - key: key, - err: nil, - }, - } - - for _, tc := range cases { - err := thingCache.Save(context.Background(), tc.key, tc.ID) - assert.Nil(t, err, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.err, err)) - - } -} - -func TestThingID(t *testing.T) { - thingCache := redis.NewThingCache(redisClient) - - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - id := "123" - err = thingCache.Save(context.Background(), key, id) - assert.Nil(t, err, fmt.Sprintf("Save thing to cache: expected nil got %s", err)) - - cases := map[string]struct { - ID string - key string - err error - }{ - "Get ID by existing thing-key": { - ID: id, - key: key, - err: nil, - }, - "Get ID by non-existing thing-key": { - ID: "", - key: wrongValue, - err: r.Nil, - }, - } - - for desc, tc := range cases { - cacheID, err := thingCache.ID(context.Background(), tc.key) - assert.Equal(t, tc.ID, cacheID, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.ID, cacheID)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) - } -} - -func TestThingRemove(t *testing.T) { - thingCache := redis.NewThingCache(redisClient) - - key, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) - id := "123" - id2 := "321" - err = thingCache.Save(context.Background(), key, id) - assert.Nil(t, err, fmt.Sprintf("got unexpected error while saving thingKey-thingID pair: %s", err)) - - cases := []struct { - desc string - ID string - err error - }{ - { - desc: "Remove existing thing from cache", - ID: id, - err: nil, - }, - { - desc: "Remove non-existing thing from cache", - ID: id2, - err: nil, - }, - } - - for _, tc := range cases { - err := thingCache.Remove(context.Background(), tc.ID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } - -} diff --git a/things/service.go b/things/service.go deleted file mode 100644 index 990757e7ee..0000000000 --- a/things/service.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package things - -import ( - "context" - "fmt" - - "github.com/mainflux/mainflux/pkg/errors" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/ulid" -) - -const ( - usersObjectKey = "users" - authoritiesObject = "authorities" - memberRelationKey = "member" - readRelationKey = "read" - writeRelationKey = "write" - deleteRelationKey = "delete" -) - -// Service specifies an API that must be fulfilled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -type Service interface { - // CreateThings adds things to the user identified by the provided key. - CreateThings(ctx context.Context, token string, things ...Thing) ([]Thing, error) - - // UpdateThing updates the thing identified by the provided ID, that - // belongs to the user identified by the provided key. - UpdateThing(ctx context.Context, token string, thing Thing) error - - // ShareThing gives actions associated with the thing to the given user IDs. - // The requester user identified by the token has to have a "write" relation - // on the thing in order to share the thing. - ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) error - - // UpdateKey updates key value of the existing thing. A non-nil error is - // returned to indicate operation failure. - UpdateKey(ctx context.Context, token, id, key string) error - - // ViewThing retrieves data about the thing identified with the provided - // ID, that belongs to the user identified by the provided key. - ViewThing(ctx context.Context, token, id string) (Thing, error) - - // ListThings retrieves data about subset of things that belongs to the - // user identified by the provided key. - ListThings(ctx context.Context, token string, pm PageMetadata) (Page, error) - - // ListThingsByChannel retrieves data about subset of things that are - // connected or not connected to specified channel and belong to the user identified by - // the provided key. - ListThingsByChannel(ctx context.Context, token, chID string, pm PageMetadata) (Page, error) - - // RemoveThing removes the thing identified with the provided ID, that - // belongs to the user identified by the provided key. - RemoveThing(ctx context.Context, token, id string) error - - // CreateChannels adds channels to the user identified by the provided key. - CreateChannels(ctx context.Context, token string, channels ...Channel) ([]Channel, error) - - // UpdateChannel updates the channel identified by the provided ID, that - // belongs to the user identified by the provided key. - UpdateChannel(ctx context.Context, token string, channel Channel) error - - // ViewChannel retrieves data about the channel identified by the provided - // ID, that belongs to the user identified by the provided key. - ViewChannel(ctx context.Context, token, id string) (Channel, error) - - // ListChannels retrieves data about subset of channels that belongs to the - // user identified by the provided key. - ListChannels(ctx context.Context, token string, pm PageMetadata) (ChannelsPage, error) - - // ListChannelsByThing retrieves data about subset of channels that have - // specified thing connected or not connected to them and belong to the user identified by - // the provided key. - ListChannelsByThing(ctx context.Context, token, thID string, pm PageMetadata) (ChannelsPage, error) - - // RemoveChannel removes the thing identified by the provided ID, that - // belongs to the user identified by the provided key. - RemoveChannel(ctx context.Context, token, id string) error - - // Connect adds things to the channels list of connected things. - Connect(ctx context.Context, token string, chIDs, thIDs []string) error - - // Disconnect removes things from the channels list of connected - // things. - Disconnect(ctx context.Context, token string, chIDs, thIDs []string) error - - // CanAccessByKey determines whether the channel can be accessed using the - // provided key and returns thing's id if access is allowed. - CanAccessByKey(ctx context.Context, chanID, key string) (string, error) - - // CanAccessByID determines whether the channel can be accessed by - // the given thing and returns error if it cannot. - CanAccessByID(ctx context.Context, chanID, thingID string) error - - // IsChannelOwner determines whether the channel can be accessed by - // the given user and returns error if it cannot. - IsChannelOwner(ctx context.Context, owner, chanID string) error - - // Identify returns thing ID for given thing key. - Identify(ctx context.Context, key string) (string, error) - - // ListMembers retrieves everything that is assigned to a group identified by groupID. - ListMembers(ctx context.Context, token, groupID string, pm PageMetadata) (Page, error) -} - -// PageMetadata contains page metadata that helps navigation. -type PageMetadata struct { - Total uint64 - Offset uint64 `json:"offset,omitempty"` - Limit uint64 `json:"limit,omitempty"` - Name string `json:"name,omitempty"` - Order string `json:"order,omitempty"` - Dir string `json:"dir,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - Disconnected bool // Used for connected or disconnected lists - FetchSharedThings bool // Used for identifying fetching either all or shared things. -} - -var _ Service = (*thingsService)(nil) - -type thingsService struct { - auth mainflux.AuthServiceClient - things ThingRepository - channels ChannelRepository - channelCache ChannelCache - thingCache ThingCache - idProvider mainflux.IDProvider - ulidProvider mainflux.IDProvider -} - -// New instantiates the things service implementation. -func New(auth mainflux.AuthServiceClient, things ThingRepository, channels ChannelRepository, ccache ChannelCache, tcache ThingCache, idp mainflux.IDProvider) Service { - return &thingsService{ - auth: auth, - things: things, - channels: channels, - channelCache: ccache, - thingCache: tcache, - idProvider: idp, - ulidProvider: ulid.New(), - } -} - -func (ts *thingsService) CreateThings(ctx context.Context, token string, things ...Thing) ([]Thing, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return []Thing{}, err - } - - if err := ts.authorize(ctx, res.GetId(), usersObjectKey, memberRelationKey); err != nil { - return []Thing{}, err - } - - ths := []Thing{} - for _, thing := range things { - th, err := ts.createThing(ctx, &thing, res) - - if err != nil { - return []Thing{}, err - } - ths = append(ths, th) - } - - return ths, nil -} - -// createThing saves the Thing and adds identity as an owner(Read, Write, Delete policies) of the Thing. -func (ts *thingsService) createThing(ctx context.Context, thing *Thing, identity *mainflux.UserIdentity) (Thing, error) { - thing.Owner = identity.GetEmail() - - if thing.ID == "" { - id, err := ts.idProvider.ID() - if err != nil { - return Thing{}, err - } - thing.ID = id - } - - if thing.Key == "" { - key, err := ts.idProvider.ID() - - if err != nil { - return Thing{}, err - } - thing.Key = key - } - - ths, err := ts.things.Save(ctx, *thing) - if err != nil { - return Thing{}, err - } - if len(ths) == 0 { - return Thing{}, errors.ErrCreateEntity - } - - ss := fmt.Sprintf("%s:%s#%s", "members", authoritiesObject, memberRelationKey) - if err := ts.claimOwnership(ctx, ths[0].ID, []string{readRelationKey, writeRelationKey, deleteRelationKey}, []string{identity.GetId(), ss}); err != nil { - return Thing{}, err - } - - return ths[0], nil -} - -func (ts *thingsService) UpdateThing(ctx context.Context, token string, thing Thing) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return err - } - - if err := ts.authorize(ctx, res.GetId(), thing.ID, writeRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return err - } - } - - thing.Owner = res.GetEmail() - - return ts.things.Update(ctx, thing) -} - -func (ts *thingsService) ShareThing(ctx context.Context, token, thingID string, actions, userIDs []string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return err - } - - if err := ts.authorize(ctx, res.GetId(), thingID, writeRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return err - } - } - - return ts.claimOwnership(ctx, thingID, actions, userIDs) -} - -func (ts *thingsService) claimOwnership(ctx context.Context, objectID string, actions, userIDs []string) error { - var errs error - for _, userID := range userIDs { - for _, action := range actions { - apr, err := ts.auth.AddPolicy(ctx, &mainflux.AddPolicyReq{Obj: objectID, Act: action, Sub: userID}) - if err != nil { - errs = errors.Wrap(fmt.Errorf("cannot claim ownership on object '%s' by user '%s': %w", objectID, userID, err), errs) - } - if !apr.GetAuthorized() { - errs = errors.Wrap(fmt.Errorf("cannot claim ownership on object '%s' by user '%s': unauthorized", objectID, userID), errs) - } - } - } - return errs -} - -func (ts *thingsService) UpdateKey(ctx context.Context, token, id, key string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - - if err := ts.authorize(ctx, res.GetId(), id, writeRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return errors.Wrap(errors.ErrNotFound, err) - } - } - - owner := res.GetEmail() - - return ts.things.UpdateKey(ctx, owner, id, key) -} - -func (ts *thingsService) ViewThing(ctx context.Context, token, id string) (Thing, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return Thing{}, errors.Wrap(errors.ErrAuthentication, err) - } - - if err := ts.authorize(ctx, res.GetId(), id, readRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return Thing{}, errors.Wrap(errors.ErrNotFound, err) - } - } - - return ts.things.RetrieveByID(ctx, res.GetEmail(), id) -} - -func (ts *thingsService) ListThings(ctx context.Context, token string, pm PageMetadata) (Page, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return Page{}, errors.Wrap(errors.ErrAuthentication, err) - } - - subject := res.GetId() - // If the user is admin, fetch all things from database. - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err == nil { - pm.FetchSharedThings = true - page, err := ts.things.RetrieveAll(ctx, res.GetEmail(), pm) - if err != nil { - return Page{}, err - } - return page, err - } - - // If the user is not admin, check 'shared' parameter from page metadata. - // If user provides 'shared' key, fetch things from policies. Otherwise, - // fetch things from the database based on thing's 'owner' field. - if pm.FetchSharedThings { - req := &mainflux.ListPoliciesReq{Act: "read", Sub: subject} - lpr, err := ts.auth.ListPolicies(ctx, req) - if err != nil { - return Page{}, err - } - - var page Page - for _, thingID := range lpr.Policies { - page.Things = append(page.Things, Thing{ID: thingID}) - } - return page, nil - } - - // By default, fetch things from Things service. - page, err := ts.things.RetrieveAll(ctx, res.GetEmail(), pm) - if err != nil { - return Page{}, err - } - - return page, nil -} - -func (ts *thingsService) ListThingsByChannel(ctx context.Context, token, chID string, pm PageMetadata) (Page, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return Page{}, errors.Wrap(errors.ErrAuthentication, err) - } - - return ts.things.RetrieveByChannel(ctx, res.GetEmail(), chID, pm) -} - -func (ts *thingsService) RemoveThing(ctx context.Context, token, id string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - - } - - if err := ts.authorize(ctx, res.GetId(), id, deleteRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return errors.Wrap(errors.ErrNotFound, err) - } - } - - if err := ts.thingCache.Remove(ctx, id); err != nil { - return err - } - return ts.things.Remove(ctx, res.GetEmail(), id) -} - -func (ts *thingsService) CreateChannels(ctx context.Context, token string, channels ...Channel) ([]Channel, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return []Channel{}, errors.Wrap(errors.ErrAuthentication, err) - } - - chs := []Channel{} - for _, channel := range channels { - ch, err := ts.createChannel(ctx, &channel, res) - if err != nil { - return []Channel{}, err - } - chs = append(chs, ch) - } - return chs, nil -} - -func (ts *thingsService) createChannel(ctx context.Context, channel *Channel, identity *mainflux.UserIdentity) (Channel, error) { - if channel.ID == "" { - chID, err := ts.idProvider.ID() - if err != nil { - return Channel{}, err - } - channel.ID = chID - } - channel.Owner = identity.GetEmail() - - chs, err := ts.channels.Save(ctx, *channel) - if err != nil { - return Channel{}, err - } - if len(chs) == 0 { - return Channel{}, errors.ErrCreateEntity - } - - ss := fmt.Sprintf("%s:%s#%s", "members", authoritiesObject, memberRelationKey) - if err := ts.claimOwnership(ctx, chs[0].ID, []string{readRelationKey, writeRelationKey, deleteRelationKey}, []string{identity.GetId(), ss}); err != nil { - return Channel{}, err - } - return chs[0], nil -} - -func (ts *thingsService) UpdateChannel(ctx context.Context, token string, channel Channel) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - - if err := ts.authorize(ctx, res.GetId(), channel.ID, writeRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return errors.Wrap(errors.ErrNotFound, err) - } - } - - channel.Owner = res.GetEmail() - return ts.channels.Update(ctx, channel) -} - -func (ts *thingsService) ViewChannel(ctx context.Context, token, id string) (Channel, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return Channel{}, errors.Wrap(errors.ErrAuthentication, err) - } - - if err := ts.authorize(ctx, res.GetId(), id, readRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return Channel{}, errors.Wrap(errors.ErrNotFound, err) - } - } - - return ts.channels.RetrieveByID(ctx, res.GetEmail(), id) -} - -func (ts *thingsService) ListChannels(ctx context.Context, token string, pm PageMetadata) (ChannelsPage, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return ChannelsPage{}, errors.Wrap(errors.ErrAuthentication, err) - } - - // If the user is admin, fetch all channels from the database. - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err == nil { - pm.FetchSharedThings = true - page, err := ts.channels.RetrieveAll(ctx, res.GetEmail(), pm) - if err != nil { - return ChannelsPage{}, err - } - return page, err - } - - // By default, fetch channels from database based on the owner field. - return ts.channels.RetrieveAll(ctx, res.GetEmail(), pm) -} - -func (ts *thingsService) ListChannelsByThing(ctx context.Context, token, thID string, pm PageMetadata) (ChannelsPage, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return ChannelsPage{}, errors.Wrap(errors.ErrAuthentication, err) - } - - return ts.channels.RetrieveByThing(ctx, res.GetEmail(), thID, pm) -} - -func (ts *thingsService) RemoveChannel(ctx context.Context, token, id string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - - if err := ts.authorize(ctx, res.GetId(), id, deleteRelationKey); err != nil { - if err := ts.authorize(ctx, res.GetId(), authoritiesObject, memberRelationKey); err != nil { - return errors.Wrap(errors.ErrNotFound, err) - } - } - - if err := ts.channelCache.Remove(ctx, id); err != nil { - return err - } - - return ts.channels.Remove(ctx, res.GetEmail(), id) -} - -func (ts *thingsService) Connect(ctx context.Context, token string, chIDs, thIDs []string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - - return ts.channels.Connect(ctx, res.GetEmail(), chIDs, thIDs) -} - -func (ts *thingsService) Disconnect(ctx context.Context, token string, chIDs, thIDs []string) error { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - - for _, chID := range chIDs { - for _, thID := range thIDs { - if err := ts.channelCache.Disconnect(ctx, chID, thID); err != nil { - return err - } - } - } - - return ts.channels.Disconnect(ctx, res.GetEmail(), chIDs, thIDs) -} - -func (ts *thingsService) CanAccessByKey(ctx context.Context, chanID, thingKey string) (string, error) { - thingID, err := ts.hasThing(ctx, chanID, thingKey) - if err == nil { - return thingID, nil - } - - thingID, err = ts.channels.HasThing(ctx, chanID, thingKey) - if err != nil { - return "", err - } - - if err := ts.thingCache.Save(ctx, thingKey, thingID); err != nil { - return "", err - } - if err := ts.channelCache.Connect(ctx, chanID, thingID); err != nil { - return "", err - } - return thingID, nil -} - -func (ts *thingsService) CanAccessByID(ctx context.Context, chanID, thingID string) error { - if connected := ts.channelCache.HasThing(ctx, chanID, thingID); connected { - return nil - } - - if err := ts.channels.HasThingByID(ctx, chanID, thingID); err != nil { - return err - } - - if err := ts.channelCache.Connect(ctx, chanID, thingID); err != nil { - return err - } - return nil -} - -func (ts *thingsService) IsChannelOwner(ctx context.Context, owner, chanID string) error { - if _, err := ts.channels.RetrieveByID(ctx, owner, chanID); err != nil { - return err - } - return nil -} - -func (ts *thingsService) Identify(ctx context.Context, key string) (string, error) { - id, err := ts.thingCache.ID(ctx, key) - if err == nil { - return id, nil - } - - id, err = ts.things.RetrieveByKey(ctx, key) - if err != nil { - return "", err - } - - if err := ts.thingCache.Save(ctx, key, id); err != nil { - return "", err - } - return id, nil -} - -func (ts *thingsService) hasThing(ctx context.Context, chanID, thingKey string) (string, error) { - thingID, err := ts.thingCache.ID(ctx, thingKey) - if err != nil { - return "", err - } - - if connected := ts.channelCache.HasThing(ctx, chanID, thingID); !connected { - return "", errors.ErrAuthorization - } - return thingID, nil -} - -func (ts *thingsService) ListMembers(ctx context.Context, token, groupID string, pm PageMetadata) (Page, error) { - if _, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}); err != nil { - return Page{}, err - } - - res, err := ts.members(ctx, token, groupID, "things", pm.Offset, pm.Limit) - if err != nil { - return Page{}, nil - } - - return ts.things.RetrieveByIDs(ctx, res, pm) -} - -func (ts *thingsService) members(ctx context.Context, token, groupID, groupType string, limit, offset uint64) ([]string, error) { - req := mainflux.MembersReq{ - Token: token, - GroupID: groupID, - Offset: offset, - Limit: limit, - Type: groupType, - } - - res, err := ts.auth.Members(ctx, &req) - if err != nil { - return nil, nil - } - return res.Members, nil -} - -func (ts *thingsService) authorize(ctx context.Context, subject, object string, relation string) error { - req := &mainflux.AuthorizeReq{ - Sub: subject, - Obj: object, - Act: relation, - } - res, err := ts.auth.Authorize(ctx, req) - if err != nil { - return errors.Wrap(errors.ErrAuthorization, err) - } - if !res.GetAuthorized() { - return errors.ErrAuthorization - } - return nil -} diff --git a/things/service_test.go b/things/service_test.go deleted file mode 100644 index 1b8d4b7304..0000000000 --- a/things/service_test.go +++ /dev/null @@ -1,1497 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package things_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/things" - "github.com/mainflux/mainflux/things/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - wrongID = "" - wrongValue = "wrong-value" - adminEmail = "admin@example.com" - email = "user@example.com" - email2 = "user2@example.com" - token = "token" - token2 = "token2" - n = uint64(10) - prefix = "fe6b4e92-cc98-425e-b0aa-" -) - -var ( - thing = things.Thing{Name: "test"} - thingList = [n]things.Thing{} - channel = things.Channel{Name: "test"} - thsExtID = []things.Thing{{ID: prefix + "000000000001", Name: "a"}, {ID: prefix + "000000000002", Name: "b"}} - chsExtID = []things.Channel{{ID: prefix + "000000000001", Name: "a"}, {ID: prefix + "000000000002", Name: "b"}} -) - -func newService(tokens map[string]string) things.Service { - userPolicy := mocks.MockSubjectSet{Object: "users", Relation: "member"} - adminPolicy := mocks.MockSubjectSet{Object: "authorities", Relation: "member"} - auth := mocks.NewAuthService(tokens, map[string][]mocks.MockSubjectSet{ - adminEmail: {userPolicy, adminPolicy}, email: {userPolicy}}) - conns := make(chan mocks.Connection) - thingsRepo := mocks.NewThingRepository(conns) - channelsRepo := mocks.NewChannelRepository(thingsRepo, conns) - chanCache := mocks.NewChannelCache() - thingCache := mocks.NewThingCache() - idProvider := uuid.NewMock() - - return things.New(auth, thingsRepo, channelsRepo, chanCache, thingCache, idProvider) -} - -func TestInit(t *testing.T) { - for i := uint64(0); i < n; i++ { - thingList[i].Name = fmt.Sprintf("name-%d", i+1) - thingList[i].ID = fmt.Sprintf("%s%012d", prefix, i+1) - thingList[i].Key = fmt.Sprintf("%s1%011d", prefix, i+1) - } -} - -func TestCreateThings(t *testing.T) { - svc := newService(map[string]string{token: email}) - - cases := []struct { - desc string - things []things.Thing - token string - err error - }{ - { - desc: "create new things", - things: []things.Thing{{Name: "a"}, {Name: "b"}, {Name: "c"}, {Name: "d"}}, - token: token, - err: nil, - }, - { - desc: "create thing with wrong credentials", - things: []things.Thing{{Name: "e"}}, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "create new things with external UUID", - things: thsExtID, - token: token, - err: nil, - }, - { - desc: "create new things with external wrong UUID", - things: []things.Thing{{ID: "b0aa-000000000001", Name: "a"}, {ID: "b0aa-000000000002", Name: "b"}}, - token: token, - err: nil, - }, - } - - for _, tc := range cases { - _, err := svc.CreateThings(context.Background(), tc.token, tc.things...) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestUpdateThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - other := things.Thing{ID: wrongID, Key: "x"} - - cases := []struct { - desc string - thing things.Thing - token string - err error - }{ - { - desc: "update existing thing", - thing: th, - token: token, - err: nil, - }, - { - desc: "update thing with wrong credentials", - thing: th, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "update non-existing thing", - thing: other, - token: token, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - err := svc.UpdateThing(context.Background(), tc.token, tc.thing) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestUpdateKey(t *testing.T) { - key := "new-key" - svc := newService(map[string]string{token: email}) - ths, err := svc.CreateThings(context.Background(), token, thing) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - - cases := []struct { - desc string - token string - id string - key string - err error - }{ - { - desc: "update key of an existing thing", - token: token, - id: th.ID, - key: key, - err: nil, - }, - { - desc: "update key with invalid credentials", - token: wrongValue, - id: th.ID, - key: key, - err: errors.ErrAuthentication, - }, - { - desc: "update key of non-existing thing", - token: token, - id: wrongID, - key: wrongValue, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - err := svc.UpdateKey(context.Background(), tc.token, tc.id, tc.key) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestShareThing(t *testing.T) { - svc := newService(map[string]string{token: email, token2: email2}) - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - policies := []string{"read"} - - cases := []struct { - desc string - token string - thingID string - policies []string - userIDs []string - err error - }{ - { - desc: "share a thing with a valid user", - token: token, - thingID: th.ID, - policies: policies, - userIDs: []string{email2}, - err: nil, - }, - { - desc: "share a thing via unauthorized access", - token: token2, - thingID: th.ID, - policies: policies, - userIDs: []string{email2}, - err: errors.ErrAuthorization, - }, - { - desc: "share a thing with invalid token", - token: wrongValue, - thingID: th.ID, - policies: policies, - userIDs: []string{email2}, - err: errors.ErrAuthentication, - }, - { - desc: "share a thing with partially invalid policies", - token: token, - thingID: th.ID, - policies: []string{"", "read"}, - userIDs: []string{email2}, - err: fmt.Errorf("cannot claim ownership on object '%s' by user '%s': %s", th.ID, email2, errors.ErrMalformedEntity), - }, - } - - for _, tc := range cases { - err := svc.ShareThing(context.Background(), tc.token, tc.thingID, tc.policies, tc.userIDs) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } - -} - -func TestViewThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - - cases := []struct { - desc string - id string - token string - response things.Thing - err error - }{ - { - desc: "view existing thing", - id: th.ID, - token: token, - response: th, - err: nil, - }, - { - desc: "view thing with wrong credentials", - id: th.ID, - token: wrongValue, - response: things.Thing{}, - err: errors.ErrAuthentication, - }, - { - desc: "view non-existing thing", - id: wrongID, - token: token, - response: things.Thing{}, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - resp, err := svc.ViewThing(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, resp, fmt.Sprintf("%s: got incorrect response from ViewThing", tc.desc)) - } -} - -func TestListThings(t *testing.T) { - svc := newService(map[string]string{token: email}) - - m := make(map[string]interface{}) - m["serial"] = "123456" - thingList[0].Metadata = m - - var ths []things.Thing - for i := uint64(0); i < n; i++ { - th := thingList[i] - ths = append(ths, th) - } - - _, err := svc.CreateThings(context.Background(), token, ths...) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - token string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "list all things", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n, - err: nil, - }, - { - desc: "list half", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: n / 2, - err: nil, - }, - { - desc: "list last thing", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n - 1, - Limit: n, - }, - size: 1, - err: nil, - }, - { - desc: "list empty set", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n + 1, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list with zero limit", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 1, - Limit: 0, - }, - size: 0, - err: nil, - }, - { - desc: "list with wrong credentials", - token: wrongValue, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: 0, - }, - size: 0, - err: errors.ErrAuthentication, - }, - { - desc: "list with metadata", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Metadata: m, - }, - size: n, - err: nil, - }, - { - desc: "list all things sorted by name ascendent", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n, - err: nil, - }, - { - desc: "list all things sorted by name descendent", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n, - err: nil, - }, - } - - for _, tc := range cases { - page, err := svc.ListThings(context.Background(), tc.token, tc.pageMetadata) - size := uint64(len(page.Things)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - // Check if Things list have been sorted properly - testSortThings(t, tc.pageMetadata, page.Things) - } -} - -func TestListThingsByChannel(t *testing.T) { - svc := newService(map[string]string{token: email}) - - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - ch := chs[0] - - thsDisconNum := uint64(4) - - var ths []things.Thing - for i := uint64(0); i < n; i++ { - th := thingList[i] - ths = append(ths, th) - } - - thsc, err := svc.CreateThings(context.Background(), token, ths...) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - var thIDs []string - for _, thID := range thsc { - thIDs = append(thIDs, thID.ID) - } - chIDs := []string{chs[0].ID} - - err = svc.Connect(context.Background(), token, chIDs, thIDs[0:n-thsDisconNum]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - // Wait for things and channels to connect - time.Sleep(time.Second) - - cases := []struct { - desc string - token string - chID string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "list all things by existing channel", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n - thsDisconNum, - err: nil, - }, - { - desc: "list half of things by existing channel", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: (n / 2) - thsDisconNum, - err: nil, - }, - { - desc: "list last thing by existing channel", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: n - 1 - thsDisconNum, - Limit: n, - }, - size: 1, - err: nil, - }, - { - desc: "list empty set of things by existing channel", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: n + 1, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list things by existing channel with zero limit", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 1, - Limit: 0, - }, - size: 0, - err: nil, - }, - { - desc: "list things by existing channel with wrong credentials", - token: wrongValue, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: 0, - }, - size: 0, - err: errors.ErrAuthentication, - }, - { - desc: "list things by non-existent channel with wrong credentials", - token: token, - chID: "non-existent", - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list all non connected things by existing channel", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - }, - size: thsDisconNum, - err: nil, - }, - { - desc: "list all things by channel sorted by name ascendent", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n - thsDisconNum, - err: nil, - }, - { - desc: "list all non-connected things by channel sorted by name ascendent", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "asc", - }, - size: thsDisconNum, - err: nil, - }, - { - desc: "list all things by channel sorted by name descendent", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n - thsDisconNum, - err: nil, - }, - { - desc: "list all non-connected things by channel sorted by name descendent", - token: token, - chID: ch.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "desc", - }, - size: thsDisconNum, - err: nil, - }, - } - - for _, tc := range cases { - page, err := svc.ListThingsByChannel(context.Background(), tc.token, tc.chID, tc.pageMetadata) - size := uint64(len(page.Things)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - // Check if Things by Channel list have been sorted properly - testSortThings(t, tc.pageMetadata, page.Things) - } -} - -func TestRemoveThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - sth := ths[0] - - cases := []struct { - desc string - id string - token string - err error - }{ - { - desc: "remove thing with wrong credentials", - id: sth.ID, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "remove existing thing", - id: sth.ID, - token: token, - err: nil, - }, - { - desc: "remove removed thing", - id: sth.ID, - token: token, - err: nil, - }, - { - desc: "remove non-existing thing", - id: wrongID, - token: token, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - err := svc.RemoveThing(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestCreateChannels(t *testing.T) { - svc := newService(map[string]string{token: email}) - - cases := []struct { - desc string - channels []things.Channel - token string - err error - }{ - { - desc: "create new channels", - channels: []things.Channel{{Name: "a"}, {Name: "b"}, {Name: "c"}, {Name: "d"}}, - token: token, - err: nil, - }, - { - desc: "create channel with wrong credentials", - channels: []things.Channel{{Name: "e"}}, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "create new channels with external UUID", - channels: chsExtID, - token: token, - err: nil, - }, - { - desc: "create new channels with invalid external UUID", - channels: []things.Channel{{ID: "b0aa-000000000001", Name: "a"}, {ID: "b0aa-000000000002", Name: "b"}}, - token: token, - err: nil, - }, - } - - for _, cc := range cases { - _, err := svc.CreateChannels(context.Background(), cc.token, cc.channels...) - assert.True(t, errors.Contains(err, cc.err), fmt.Sprintf("%s: expected %s got %s\n", cc.desc, cc.err, err)) - } -} - -func TestUpdateChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - other := things.Channel{ID: wrongID} - - cases := []struct { - desc string - channel things.Channel - token string - err error - }{ - { - desc: "update existing channel", - channel: ch, - token: token, - err: nil, - }, - { - desc: "update channel with wrong credentials", - channel: ch, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "update non-existing channel", - channel: other, - token: token, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.UpdateChannel(context.Background(), tc.token, tc.channel) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestViewChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - - cases := []struct { - desc string - id string - token string - response things.Channel - err error - metadata things.Metadata - }{ - { - desc: "view existing channel", - id: ch.ID, - token: token, - response: ch, - err: nil, - }, - { - desc: "view channel with wrong credentials", - id: ch.ID, - token: wrongValue, - response: things.Channel{}, - err: errors.ErrAuthentication, - }, - { - desc: "view non-existing channel", - id: wrongID, - token: token, - response: things.Channel{}, - err: errors.ErrNotFound, - }, - { - desc: "view channel with metadata", - id: wrongID, - token: token, - response: things.Channel{}, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - resp, err := svc.ViewChannel(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, resp, fmt.Sprintf("%s: got incorrect response from ViewChannel()", tc.desc)) - } -} - -func TestListChannels(t *testing.T) { - svc := newService(map[string]string{token: email}) - meta := things.Metadata{} - meta["name"] = "test-channel" - channel.Metadata = meta - - var chs []things.Channel - for i := uint64(0); i < n; i++ { - ch := channel - ch.Name = fmt.Sprintf("name-%d", i) - chs = append(chs, ch) - } - - _, err := svc.CreateChannels(context.Background(), token, chs...) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - token string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "list all channels", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n, - err: nil, - }, - { - desc: "list half", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n / 2, - Limit: n, - }, - size: n / 2, - err: nil, - }, - { - desc: "list last channel", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n - 1, - Limit: n, - }, - size: 1, - err: nil, - }, - { - desc: "list empty set", - token: token, - pageMetadata: things.PageMetadata{ - Offset: n + 1, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list with zero limit and offset 1", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 1, - Limit: 0, - }, - size: n - 1, - err: nil, - }, - { - desc: "list with wrong credentials", - token: wrongValue, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: 0, - }, - size: 0, - err: errors.ErrAuthentication, - }, - { - desc: "list with existing name", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Name: "chanel_name", - }, - size: n, - err: nil, - }, - { - desc: "list with non-existent name", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Name: "wrong", - }, - size: n, - err: nil, - }, - { - desc: "list all channels with metadata", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Metadata: meta, - }, - size: n, - err: nil, - }, - { - desc: "list all channels sorted by name ascendent", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n, - err: nil, - }, - { - desc: "list all channels sorted by name descendent", - token: token, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n, - err: nil, - }, - } - - for _, tc := range cases { - page, err := svc.ListChannels(context.Background(), tc.token, tc.pageMetadata) - size := uint64(len(page.Channels)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - // Check if channels list have been sorted properly - testSortChannels(t, tc.pageMetadata, page.Channels) - } -} - -func TestListChannelsByThing(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - th := ths[0] - - chsDisconNum := uint64(4) - - var chs []things.Channel - for i := uint64(0); i < n; i++ { - ch := channel - ch.Name = fmt.Sprintf("name-%d", i) - chs = append(chs, ch) - } - - chsc, err := svc.CreateChannels(context.Background(), token, chs...) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - var chIDs []string - for _, chID := range chsc { - chIDs = append(chIDs, chID.ID) - } - thIDs := []string{ths[0].ID} - - err = svc.Connect(context.Background(), token, chIDs[0:n-chsDisconNum], thIDs) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - // Wait for things and channels to connect. - time.Sleep(time.Second) - - cases := []struct { - desc string - token string - thID string - pageMetadata things.PageMetadata - size uint64 - err error - }{ - { - desc: "list all channels by existing thing", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: n - chsDisconNum, - err: nil, - }, - { - desc: "list half of channels by existing thing", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: (n - chsDisconNum) / 2, - Limit: n, - }, - size: (n - chsDisconNum) / 2, - err: nil, - }, - { - desc: "list last channel by existing thing", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: n - 1 - chsDisconNum, - Limit: n, - }, - size: 1, - err: nil, - }, - { - desc: "list empty set of channels by existing thing", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: n + 1, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list channels by existing thing with zero limit", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 1, - Limit: 0, - }, - size: 0, - err: nil, - }, - { - desc: "list channels by existing thing with wrong credentials", - token: wrongValue, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: 0, - }, - size: 0, - err: errors.ErrAuthentication, - }, - { - desc: "list channels by non-existent thing", - token: token, - thID: "non-existent", - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - }, - size: 0, - err: nil, - }, - { - desc: "list all non-connected channels by existing thing", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - }, - size: chsDisconNum, - err: nil, - }, - { - desc: "list all channels by thing sorted by name ascendent", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "asc", - }, - size: n - chsDisconNum, - err: nil, - }, - { - desc: "list all non-connected channels by thing sorted by name ascendent", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "asc", - }, - size: chsDisconNum, - err: nil, - }, - { - desc: "list all channels by thing sorted by name descendent", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Order: "name", - Dir: "desc", - }, - size: n - chsDisconNum, - err: nil, - }, - { - desc: "list all non-connected channels by thing sorted by name descendent", - token: token, - thID: th.ID, - pageMetadata: things.PageMetadata{ - Offset: 0, - Limit: n, - Disconnected: true, - Order: "name", - Dir: "desc", - }, - size: chsDisconNum, - err: nil, - }, - } - - for _, tc := range cases { - page, err := svc.ListChannelsByThing(context.Background(), tc.token, tc.thID, tc.pageMetadata) - size := uint64(len(page.Channels)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - // Check if Channels by Thing list have been sorted properly - testSortChannels(t, tc.pageMetadata, page.Channels) - } -} - -func TestRemoveChannel(t *testing.T) { - svc := newService(map[string]string{token: adminEmail}) - chs, err := svc.CreateChannels(context.Background(), token, channel) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - - cases := []struct { - desc string - id string - token string - err error - }{ - { - desc: "remove channel with wrong credentials", - id: ch.ID, - token: wrongValue, - err: errors.ErrAuthentication, - }, - { - desc: "remove existing channel", - id: ch.ID, - token: token, - err: nil, - }, - { - desc: "remove removed channel", - id: ch.ID, - token: token, - err: nil, - }, - { - desc: "remove non-existing channel", - id: ch.ID, - token: token, - err: nil, - }, - } - - for _, tc := range cases { - err := svc.RemoveChannel(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestConnect(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - chs, err := svc.CreateChannels(context.Background(), token, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - - cases := []struct { - desc string - token string - chanID string - thingID string - err error - }{ - { - desc: "connect thing", - token: token, - chanID: ch.ID, - thingID: th.ID, - err: nil, - }, - { - desc: "connect thing with wrong credentials", - token: wrongValue, - chanID: ch.ID, - thingID: th.ID, - err: errors.ErrAuthentication, - }, - { - desc: "connect thing to non-existing channel", - token: token, - chanID: wrongID, - thingID: th.ID, - err: errors.ErrNotFound, - }, - { - desc: "connect non-existing thing to channel", - token: token, - chanID: ch.ID, - thingID: wrongID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.Connect(context.Background(), tc.token, []string{tc.chanID}, []string{tc.thingID}) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestDisconnect(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - chs, err := svc.CreateChannels(context.Background(), token, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - cases := []struct { - desc string - token string - chanID string - thingID string - err error - }{ - { - desc: "disconnect connected thing", - token: token, - chanID: ch.ID, - thingID: th.ID, - err: nil, - }, - { - desc: "disconnect disconnected thing", - token: token, - chanID: ch.ID, - thingID: th.ID, - err: errors.ErrNotFound, - }, - { - desc: "disconnect with wrong credentials", - token: wrongValue, - chanID: ch.ID, - thingID: th.ID, - err: errors.ErrAuthentication, - }, - { - desc: "disconnect from non-existing channel", - token: token, - chanID: wrongID, - thingID: th.ID, - err: errors.ErrNotFound, - }, - { - desc: "disconnect non-existing thing", - token: token, - chanID: ch.ID, - thingID: wrongID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.Disconnect(context.Background(), tc.token, []string{tc.chanID}, []string{tc.thingID}) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } - -} - -func TestCanAccessByKey(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - chs, err := svc.CreateChannels(context.Background(), token, channel, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - err = svc.Connect(context.Background(), token, []string{chs[0].ID}, []string{ths[0].ID}) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - cases := []struct { - desc string - token string - channel string - err error - }{ - { - desc: "allowed access", - token: ths[0].Key, - channel: chs[0].ID, - err: nil, - }, - { - desc: "non-existing thing", - token: wrongValue, - channel: chs[0].ID, - err: errors.ErrNotFound, - }, - { - desc: "non-existing chan", - token: ths[0].Key, - channel: wrongValue, - err: errors.ErrAuthorization, - }, - { - desc: "non-connected channel", - token: ths[0].Key, - channel: chs[1].ID, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - _, err := svc.CanAccessByKey(context.Background(), tc.channel, tc.token) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected '%s' got '%s'\n", tc.desc, tc.err, err)) - } -} - -func TestCanAccessByID(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0], thingList[1]) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - chs, err := svc.CreateChannels(context.Background(), token, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ch := chs[0] - err = svc.Connect(context.Background(), token, []string{ch.ID}, []string{th.ID}) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - - cases := []struct { - desc string - thingID string - channel string - err error - }{ - { - desc: "allowed access", - thingID: th.ID, - channel: ch.ID, - err: nil, - }, - { - desc: "access to non-existing thing", - thingID: wrongValue, - channel: ch.ID, - err: errors.ErrAuthorization, - }, - { - desc: "access to non-existing channel", - thingID: th.ID, - channel: wrongID, - err: errors.ErrAuthorization, - }, - { - desc: "access to not-connected thing", - thingID: ths[1].ID, - channel: ch.ID, - err: errors.ErrAuthorization, - }, - } - - for _, tc := range cases { - err := svc.CanAccessByID(context.Background(), tc.channel, tc.thingID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestIsChannelOwner(t *testing.T) { - svc := newService(map[string]string{token: email, token2: "john.doe@email.net"}) - - chs, err := svc.CreateChannels(context.Background(), token, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - ownedCh := chs[0] - chs, err = svc.CreateChannels(context.Background(), token2, channel) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - nonOwnedCh := chs[0] - - cases := []struct { - desc string - channel string - err error - }{ - { - desc: "user owns channel", - channel: ownedCh.ID, - err: nil, - }, - { - desc: "user does not own channel", - channel: nonOwnedCh.ID, - err: errors.ErrNotFound, - }, - { - desc: "access to non-existing channel", - channel: wrongID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.IsChannelOwner(context.Background(), email, tc.channel) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestIdentify(t *testing.T) { - svc := newService(map[string]string{token: email}) - - ths, err := svc.CreateThings(context.Background(), token, thingList[0]) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) - th := ths[0] - - cases := []struct { - desc string - token string - id string - err error - }{ - { - desc: "identify existing thing", - token: th.Key, - id: th.ID, - err: nil, - }, - { - desc: "identify non-existing thing", - token: wrongValue, - id: wrongID, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - id, err := svc.Identify(context.Background(), tc.token) - assert.Equal(t, tc.id, id, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.id, id)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func testSortThings(t *testing.T, pm things.PageMetadata, ths []things.Thing) { - switch pm.Order { - case "name": - current := ths[0] - for _, res := range ths { - if pm.Dir == "asc" { - assert.GreaterOrEqual(t, res.Name, current.Name) - } - if pm.Dir == "desc" { - assert.GreaterOrEqual(t, current.Name, res.Name) - } - current = res - } - default: - break - } -} - -func testSortChannels(t *testing.T, pm things.PageMetadata, chs []things.Channel) { - switch pm.Order { - case "name": - current := chs[0] - for _, res := range chs { - if pm.Dir == "asc" { - assert.GreaterOrEqual(t, res.Name, current.Name) - } - if pm.Dir == "desc" { - assert.GreaterOrEqual(t, current.Name, res.Name) - } - current = res - } - default: - break - } -} diff --git a/things/standalone/standalone.go b/things/standalone/standalone.go deleted file mode 100644 index 8af807ca0b..0000000000 --- a/things/standalone/standalone.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package standalone - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "google.golang.org/grpc" -) - -var errUnsupported = errors.New("not supported in standalone mode") - -var _ mainflux.AuthServiceClient = (*singleUserRepo)(nil) - -type singleUserRepo struct { - email string - token string -} - -// NewAuthService creates single user repository for constrained environments. -func NewAuthService(email, token string) mainflux.AuthServiceClient { - return singleUserRepo{ - email: email, - token: token, - } -} - -func (repo singleUserRepo) Issue(ctx context.Context, req *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { - if repo.token != req.GetEmail() { - return nil, errors.ErrAuthentication - } - - return &mainflux.Token{Value: repo.token}, nil -} - -func (repo singleUserRepo) Identify(ctx context.Context, token *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { - if repo.token != token.GetValue() { - return nil, errors.ErrAuthentication - } - - return &mainflux.UserIdentity{Id: repo.email, Email: repo.email}, nil -} - -func (repo singleUserRepo) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { - if repo.email != req.Sub { - return &mainflux.AuthorizeRes{}, errUnsupported - } - return &mainflux.AuthorizeRes{Authorized: true}, nil -} - -func (repo singleUserRepo) AddPolicy(ctx context.Context, req *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { - if repo.email != req.Sub { - return &mainflux.AddPolicyRes{}, errUnsupported - } - return &mainflux.AddPolicyRes{Authorized: true}, nil -} - -func (repo singleUserRepo) DeletePolicy(ctx context.Context, req *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - if repo.email != req.Sub { - return &mainflux.DeletePolicyRes{}, errUnsupported - } - return &mainflux.DeletePolicyRes{Deleted: true}, nil -} - -func (repo singleUserRepo) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { - return &mainflux.ListPoliciesRes{}, errUnsupported -} - -func (repo singleUserRepo) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - return &mainflux.MembersRes{}, errUnsupported -} - -func (repo singleUserRepo) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { - return &empty.Empty{}, errUnsupported -} diff --git a/things/standalone/standalone_test.go b/things/standalone/standalone_test.go deleted file mode 100644 index 821670f6ef..0000000000 --- a/things/standalone/standalone_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package standalone_test - -import ( - "context" - "fmt" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/things/standalone" - "github.com/stretchr/testify/assert" -) - -const ( - email = "john.doe@example.com" - token = "token" -) - -func TestIdentify(t *testing.T) { - svc := standalone.NewAuthService(email, token) - - cases := []struct { - desc string - token string - id string - err error - }{ - { - desc: "identify non-existing user", - token: "non-existing", - id: "", - err: errors.ErrAuthentication, - }, - { - desc: "identify existing user", - token: token, - id: email, - err: nil, - }, - } - - for _, tc := range cases { - id, err := svc.Identify(context.Background(), &mainflux.Token{Value: tc.token}) - assert.Equal(t, tc.id, id.GetEmail(), fmt.Sprintf("%s: expected %s, got %s", tc.desc, tc.id, id.GetEmail())) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s, got %s", tc.desc, tc.err, err)) - } -} - -func TestIssue(t *testing.T) { - svc := standalone.NewAuthService(email, token) - - cases := []struct { - desc string - token string - id string - err error - }{ - { - desc: "issue key", - token: token, - id: token, - err: nil, - }, - { - desc: "issue key with an invalid token", - token: "non-existing", - id: "", - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - id, err := svc.Issue(context.Background(), &mainflux.IssueReq{Id: tc.id, Email: tc.token, Type: 0}) - assert.Equal(t, tc.id, id.GetValue(), fmt.Sprintf("%s: expected %s, got %s", tc.desc, tc.id, id.GetValue())) - assert.Equal(t, tc.err, err, fmt.Sprintf("%s: expected %s, got %s", tc.desc, tc.err, err)) - } -} diff --git a/things/things.go b/things/things.go deleted file mode 100644 index bb3a8149f8..0000000000 --- a/things/things.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package things - -import ( - "context" - - "github.com/mainflux/mainflux/pkg/errors" -) - -var ( - // ErrConnect indicates error in adding connection - ErrConnect = errors.New("add connection failed") - - // ErrDisconnect indicates error in removing connection - ErrDisconnect = errors.New("remove connection failed") - - // ErrEntityConnected indicates error while checking connection in database - ErrEntityConnected = errors.New("check thing-channel connection in database error") -) - -// Metadata to be used for Mainflux thing or channel for customized -// describing of particular thing or channel. -type Metadata map[string]interface{} - -// Thing represents a Mainflux thing. Each thing is owned by one user, and -// it is assigned with the unique identifier and (temporary) access key. -type Thing struct { - ID string - Owner string - Name string - Key string - Metadata Metadata -} - -// Page contains page related metadata as well as list of things that -// belong to this page. -type Page struct { - PageMetadata - Things []Thing -} - -// ThingRepository specifies a thing persistence API. -type ThingRepository interface { - // Save persists multiple things. Things are saved using a transaction. If one thing - // fails then none will be saved. Successful operation is indicated by non-nil - // error response. - Save(ctx context.Context, ths ...Thing) ([]Thing, error) - - // Update performs an update to the existing thing. A non-nil error is - // returned to indicate operation failure. - Update(ctx context.Context, t Thing) error - - // UpdateKey updates key value of the existing thing. A non-nil error is - // returned to indicate operation failure. - UpdateKey(ctx context.Context, owner, id, key string) error - - // RetrieveByID retrieves the thing having the provided identifier, that is owned - // by the specified user. - RetrieveByID(ctx context.Context, owner, id string) (Thing, error) - - // RetrieveByKey returns thing ID for given thing key. - RetrieveByKey(ctx context.Context, key string) (string, error) - - // RetrieveAll retrieves the subset of things owned by the specified user - RetrieveAll(ctx context.Context, owner string, pm PageMetadata) (Page, error) - - // RetrieveByIDs retrieves the subset of things specified by given thing ids. - RetrieveByIDs(ctx context.Context, thingIDs []string, pm PageMetadata) (Page, error) - - // RetrieveByChannel retrieves the subset of things owned by the specified - // user and connected or not connected to specified channel. - RetrieveByChannel(ctx context.Context, owner, chID string, pm PageMetadata) (Page, error) - - // Remove removes the thing having the provided identifier, that is owned - // by the specified user. - Remove(ctx context.Context, owner, id string) error -} - -// ThingCache contains thing caching interface. -type ThingCache interface { - // Save stores pair thing key, thing id. - Save(context.Context, string, string) error - - // ID returns thing ID for given key. - ID(context.Context, string) (string, error) - - // Removes thing from cache. - Remove(context.Context, string) error -} diff --git a/things/tracing/channels.go b/things/tracing/channels.go deleted file mode 100644 index 6cf4e8e670..0000000000 --- a/things/tracing/channels.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package tracing - -import ( - "context" - - "github.com/mainflux/mainflux/things" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - saveChannelsOp = "save_channels" - updateChannelOp = "update_channel" - retrieveChannelByIDOp = "retrieve_channel_by_id" - retrieveAllChannelsOp = "retrieve_all_channels" - retrieveChannelsByThingOp = "retrieve_channels_by_thing" - removeChannelOp = "retrieve_channel" - connectOp = "connect" - disconnectOp = "disconnect" - hasThingOp = "has_thing" - hasThingByIDOp = "has_thing_by_id" -) - -var ( - _ things.ChannelRepository = (*channelRepositoryMiddleware)(nil) - _ things.ChannelCache = (*channelCacheMiddleware)(nil) -) - -type channelRepositoryMiddleware struct { - tracer opentracing.Tracer - repo things.ChannelRepository -} - -// ChannelRepositoryMiddleware tracks request and their latency, and adds spans -// to context. -func ChannelRepositoryMiddleware(tracer opentracing.Tracer, repo things.ChannelRepository) things.ChannelRepository { - return channelRepositoryMiddleware{ - tracer: tracer, - repo: repo, - } -} - -func (crm channelRepositoryMiddleware) Save(ctx context.Context, channels ...things.Channel) ([]things.Channel, error) { - span := createSpan(ctx, crm.tracer, saveChannelsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.Save(ctx, channels...) -} - -func (crm channelRepositoryMiddleware) Update(ctx context.Context, ch things.Channel) error { - span := createSpan(ctx, crm.tracer, updateChannelOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.Update(ctx, ch) -} - -func (crm channelRepositoryMiddleware) RetrieveByID(ctx context.Context, owner, id string) (things.Channel, error) { - span := createSpan(ctx, crm.tracer, retrieveChannelByIDOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.RetrieveByID(ctx, owner, id) -} - -func (crm channelRepositoryMiddleware) RetrieveAll(ctx context.Context, owner string, pm things.PageMetadata) (things.ChannelsPage, error) { - span := createSpan(ctx, crm.tracer, retrieveAllChannelsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.RetrieveAll(ctx, owner, pm) -} - -func (crm channelRepositoryMiddleware) RetrieveByThing(ctx context.Context, owner, thID string, pm things.PageMetadata) (things.ChannelsPage, error) { - span := createSpan(ctx, crm.tracer, retrieveChannelsByThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.RetrieveByThing(ctx, owner, thID, pm) -} - -func (crm channelRepositoryMiddleware) Remove(ctx context.Context, owner, id string) error { - span := createSpan(ctx, crm.tracer, removeChannelOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.Remove(ctx, owner, id) -} - -func (crm channelRepositoryMiddleware) Connect(ctx context.Context, owner string, chIDs, thIDs []string) error { - span := createSpan(ctx, crm.tracer, connectOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.Connect(ctx, owner, chIDs, thIDs) -} - -func (crm channelRepositoryMiddleware) Disconnect(ctx context.Context, owner string, chIDs, thIDs []string) error { - span := createSpan(ctx, crm.tracer, disconnectOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.Disconnect(ctx, owner, chIDs, thIDs) -} - -func (crm channelRepositoryMiddleware) HasThing(ctx context.Context, chanID, key string) (string, error) { - span := createSpan(ctx, crm.tracer, hasThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.HasThing(ctx, chanID, key) -} - -func (crm channelRepositoryMiddleware) HasThingByID(ctx context.Context, chanID, thingID string) error { - span := createSpan(ctx, crm.tracer, hasThingByIDOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return crm.repo.HasThingByID(ctx, chanID, thingID) -} - -type channelCacheMiddleware struct { - tracer opentracing.Tracer - cache things.ChannelCache -} - -// ChannelCacheMiddleware tracks request and their latency, and adds spans -// to context. -func ChannelCacheMiddleware(tracer opentracing.Tracer, cache things.ChannelCache) things.ChannelCache { - return channelCacheMiddleware{ - tracer: tracer, - cache: cache, - } -} - -func (ccm channelCacheMiddleware) Connect(ctx context.Context, chanID, thingID string) error { - span := createSpan(ctx, ccm.tracer, connectOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return ccm.cache.Connect(ctx, chanID, thingID) -} - -func (ccm channelCacheMiddleware) HasThing(ctx context.Context, chanID, thingID string) bool { - span := createSpan(ctx, ccm.tracer, hasThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return ccm.cache.HasThing(ctx, chanID, thingID) -} - -func (ccm channelCacheMiddleware) Disconnect(ctx context.Context, chanID, thingID string) error { - span := createSpan(ctx, ccm.tracer, disconnectOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return ccm.cache.Disconnect(ctx, chanID, thingID) -} - -func (ccm channelCacheMiddleware) Remove(ctx context.Context, chanID string) error { - span := createSpan(ctx, ccm.tracer, removeChannelOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return ccm.cache.Remove(ctx, chanID) -} diff --git a/things/tracing/doc.go b/things/tracing/doc.go deleted file mode 100644 index 12b627215a..0000000000 --- a/things/tracing/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package tracing contains middlewares that will add spans -// to existing traces. -package tracing diff --git a/things/tracing/things.go b/things/tracing/things.go deleted file mode 100644 index ca8855a782..0000000000 --- a/things/tracing/things.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package tracing - -import ( - "context" - - "github.com/mainflux/mainflux/things" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - saveThingOp = "save_thing" - saveThingsOp = "save_things" - updateThingOp = "update_thing" - updateThingKeyOp = "update_thing_by_key" - retrieveThingByIDOp = "retrieve_thing_by_id" - retrieveThingByKeyOp = "retrieve_thing_by_key" - retrieveAllThingsOp = "retrieve_all_things" - retrieveThingsByChannelOp = "retrieve_things_by_chan" - removeThingOp = "remove_thing" - retrieveThingIDByKeyOp = "retrieve_id_by_key" -) - -var ( - _ things.ThingRepository = (*thingRepositoryMiddleware)(nil) - _ things.ThingCache = (*thingCacheMiddleware)(nil) -) - -type thingRepositoryMiddleware struct { - tracer opentracing.Tracer - repo things.ThingRepository -} - -// ThingRepositoryMiddleware tracks request and their latency, and adds spans -// to context. -func ThingRepositoryMiddleware(tracer opentracing.Tracer, repo things.ThingRepository) things.ThingRepository { - return thingRepositoryMiddleware{ - tracer: tracer, - repo: repo, - } -} - -func (trm thingRepositoryMiddleware) Save(ctx context.Context, ths ...things.Thing) ([]things.Thing, error) { - span := createSpan(ctx, trm.tracer, saveThingsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.Save(ctx, ths...) -} - -func (trm thingRepositoryMiddleware) Update(ctx context.Context, th things.Thing) error { - span := createSpan(ctx, trm.tracer, updateThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.Update(ctx, th) -} - -func (trm thingRepositoryMiddleware) UpdateKey(ctx context.Context, owner, id, key string) error { - span := createSpan(ctx, trm.tracer, updateThingKeyOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.UpdateKey(ctx, owner, id, key) -} - -func (trm thingRepositoryMiddleware) RetrieveByID(ctx context.Context, owner, id string) (things.Thing, error) { - span := createSpan(ctx, trm.tracer, retrieveThingByIDOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.RetrieveByID(ctx, owner, id) -} - -func (trm thingRepositoryMiddleware) RetrieveByKey(ctx context.Context, key string) (string, error) { - span := createSpan(ctx, trm.tracer, retrieveThingByKeyOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.RetrieveByKey(ctx, key) -} - -func (trm thingRepositoryMiddleware) RetrieveAll(ctx context.Context, owner string, pm things.PageMetadata) (things.Page, error) { - span := createSpan(ctx, trm.tracer, retrieveAllThingsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.RetrieveAll(ctx, owner, pm) -} - -func (trm thingRepositoryMiddleware) RetrieveByIDs(ctx context.Context, thingIDs []string, pm things.PageMetadata) (things.Page, error) { - span := createSpan(ctx, trm.tracer, retrieveAllThingsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.RetrieveByIDs(ctx, thingIDs, pm) -} - -func (trm thingRepositoryMiddleware) RetrieveByChannel(ctx context.Context, owner, chID string, pm things.PageMetadata) (things.Page, error) { - span := createSpan(ctx, trm.tracer, retrieveThingsByChannelOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.RetrieveByChannel(ctx, owner, chID, pm) -} - -func (trm thingRepositoryMiddleware) Remove(ctx context.Context, owner, id string) error { - span := createSpan(ctx, trm.tracer, removeThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return trm.repo.Remove(ctx, owner, id) -} - -type thingCacheMiddleware struct { - tracer opentracing.Tracer - cache things.ThingCache -} - -// ThingCacheMiddleware tracks request and their latency, and adds spans -// to context. -func ThingCacheMiddleware(tracer opentracing.Tracer, cache things.ThingCache) things.ThingCache { - return thingCacheMiddleware{ - tracer: tracer, - cache: cache, - } -} - -func (tcm thingCacheMiddleware) Save(ctx context.Context, thingKey string, thingID string) error { - span := createSpan(ctx, tcm.tracer, saveThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return tcm.cache.Save(ctx, thingKey, thingID) -} - -func (tcm thingCacheMiddleware) ID(ctx context.Context, thingKey string) (string, error) { - span := createSpan(ctx, tcm.tracer, retrieveThingIDByKeyOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return tcm.cache.ID(ctx, thingKey) -} - -func (tcm thingCacheMiddleware) Remove(ctx context.Context, thingID string) error { - span := createSpan(ctx, tcm.tracer, removeThingOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return tcm.cache.Remove(ctx, thingID) -} - -func createSpan(ctx context.Context, tracer opentracing.Tracer, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tracer.StartSpan(opName) -} diff --git a/tools/e2e/Makefile b/tools/e2e/Makefile new file mode 100644 index 0000000000..e5fc126e9d --- /dev/null +++ b/tools/e2e/Makefile @@ -0,0 +1,15 @@ +# Copyright (c) Mainflux +# SPDX-License-Identifier: Apache-2.0 + +PROGRAM = e2e +SOURCES = $(wildcard *.go) cmd/main.go + +all: $(PROGRAM) + +.PHONY: all clean + +$(PROGRAM): $(SOURCES) + go build -ldflags "-s -w" -o $@ cmd/main.go + +clean: + rm -rf $(PROGRAM) diff --git a/tools/e2e/README.md b/tools/e2e/README.md new file mode 100644 index 0000000000..ef63ddc159 --- /dev/null +++ b/tools/e2e/README.md @@ -0,0 +1,93 @@ +# Mainflux Users Groups Things and Channels E2E Testing Tool + +A simple utility to create a list of groups and users connected to these groups and channels and things connected to these channels. + +## Installation + +```bash +cd tools/e2e +make +``` + +### Usage + +```bash +./e2e --help +Tool for testing end-to-end flow of mainflux by doing a couple of operations namely: +1. Creating, viewing, updating and changing status of users, groups, things and channels. +2. Connecting users and groups to each other and things and channels to each other. +3. Sending messages from things to channels on all 4 protocol adapters (HTTP, WS, CoAP and MQTT). +Complete documentation is available at https://docs.mainflux.io + + +Usage: + + e2e [flags] + + +Examples: + +Here is a simple example of using e2e tool. +Use the following commands from the root mainflux directory: + +go run tools/e2e/cmd/main.go +go run tools/e2e/cmd/main.go --host 142.93.118.47 +go run tools/e2e/cmd/main.go --host localhost --num 10 --num_of_messages 100 --prefix e2e + + +Flags: + + -h, --help help for e2e + -H, --host string address for a running mainflux instance (default "localhost") + -n, --num uint number of users, groups, channels and things to create and connect (default 10) + -N, --num_of_messages uint number of messages to send (default 10) + -p, --prefix string name prefix for users, groups, things and channels +``` + +To use `-H` option, you can specify the address for the Mainflux instance as an argument when running the program. For example, if the Mainflux instance is running on another computer with the IP address 192.168.0.1, you could use the following command: + +```bash +go run tools/e2e/cmd/main.go --host 142.93.118.47 +``` + +This will tell the program to connect to the Mainflux instance running on the specified IP address. + +If you want to create a list of channels with certificates: + +```bash +go run tools/e2e/cmd/main.go --host localhost --num 10 --num_of_messages 100 --prefix e2e +``` + +Example of output: + +```bash +created user with token eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODEyMDYwMjMsImlhdCI6MTY4MTIwNTEyMywiaWRlbnRpdHkiOiJlMmUtbGF0ZS1zaWxlbmNlQGVtYWlsLmNvbSIsImlzcyI6ImNsaWVudHMuYXV0aCIsInN1YiI6IjdlZDIyY2IyLTRlMzQtNDhiZi04Y2RlLTIxMjZiYzYyYzY4MyIsInR5cGUiOiJhY2Nlc3MifQ.AdExNYs5mVQNpo_ejJDq7KTC5dKkZWmgM9FJvTM2T_GM2LE9ASQv0ymC4wS3PDXKWf-OcaR8DJIxE6WiG3fztQ +created users of ids: +9e87bc1d-0889-4252-a3df-36e02edfc859 +c1e4901a-fb7f-45e9-b934-c55194b1d028 +c341a9cb-542b-4c3b-afd6-c98e04ed5e7e +8cfc886b-21fa-4205-80b4-3601827b94ff +334984d7-30eb-4b06-92b8-5ec182bebac5 +created groups of ids: +7744ec55-c767-4137-be96-0d79699772a4 +c8fe4d9d-3ad6-4687-83c0-171356f3e4f6 +513f7295-0923-4e21-b41a-3cfd1cb7b9b9 +54bd71ea-3c22-401e-89ea-d58162b983c0 +ae91b327-4c40-4e68-91fe-cd6223ee4e99 +created things of ids: +5909a907-7413-47d4-b793-e1eb36988a5f +f9b6bc18-1862-4a24-8973-adde11cb3303 +c2bd6eed-6f38-464c-989c-fe8ec8c084ba +8c76702c-0534-4246-8ed7-21816b4f91cf +25005ca8-e886-465f-9cd1-4f3c4a95c6c1 +created channels of ids: +ebb0e5f3-2241-4770-a7cc-f4bbd06134ca +d654948d-d6c1-4eae-b69a-29c853282c3d +2c2a5496-89cf-47e6-9d38-5fd5542337bd +7ab3319d-269c-4b07-9dc5-f9906693e894 +5d8fa139-10e7-4683-94f3-4e881b4db041 +created policies for users, groups, things and channels +viewed users, groups, things and channels +updated users, groups, things and channels +sent messages to channels +``` diff --git a/tools/e2e/cmd/main.go b/tools/e2e/cmd/main.go new file mode 100644 index 0000000000..a0ba154475 --- /dev/null +++ b/tools/e2e/cmd/main.go @@ -0,0 +1,57 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "log" + + cc "github.com/ivanpirog/coloredcobra" + "github.com/mainflux/mainflux/tools/e2e" + "github.com/spf13/cobra" +) + +const defNum = uint64(10) + +func main() { + econf := e2e.Config{} + + var rootCmd = &cobra.Command{ + Use: "e2e", + Short: "e2e is end-to-end testing tool for Mainflux", + Long: "Tool for testing end-to-end flow of mainflux by doing a couple of operations namely:\n" + + "1. Creating, viewing, updating and changing status of users, groups, things and channels.\n" + + "2. Connecting users and groups to each other and things and channels to each other.\n" + + "3. Sending messages from things to channels on all 4 protocol adapters (HTTP, WS, CoAP and MQTT).\n" + + "Complete documentation is available at https://docs.mainflux.io", + Example: "Here is a simple example of using e2e tool.\n" + + "Use the following commands from the root mainflux directory:\n\n" + + "go run tools/e2e/cmd/main.go\n" + + "go run tools/e2e/cmd/main.go --host 142.93.118.47\n" + + "go run tools/e2e/cmd/main.go --host localhost --num 10 --num_of_messages 100 --prefix e2e", + Run: func(_ *cobra.Command, _ []string) { + e2e.Test(econf) + }, + } + + cc.Init(&cc.Config{ + RootCmd: rootCmd, + Headings: cc.HiCyan + cc.Bold + cc.Underline, + CmdShortDescr: cc.Magenta, + Example: cc.Italic + cc.Magenta, + ExecName: cc.Bold, + Flags: cc.HiGreen + cc.Bold, + FlagsDescr: cc.Green, + FlagsDataType: cc.White + cc.Italic, + }) + + // Root Flags + rootCmd.PersistentFlags().StringVarP(&econf.Host, "host", "H", "localhost", "address for a running mainflux instance") + rootCmd.PersistentFlags().StringVarP(&econf.Prefix, "prefix", "p", "", "name prefix for users, groups, things and channels") + rootCmd.PersistentFlags().Uint64VarP(&econf.Num, "num", "n", defNum, "number of users, groups, channels and things to create and connect") + rootCmd.PersistentFlags().Uint64VarP(&econf.NumOfMsg, "num_of_messages", "N", defNum, "number of messages to send") + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/tools/e2e/e2e.go b/tools/e2e/e2e.go new file mode 100644 index 0000000000..fd0c68fb0e --- /dev/null +++ b/tools/e2e/e2e.go @@ -0,0 +1,599 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "fmt" + "math/rand" + "net/http" + "os" + "os/exec" + "reflect" + "strings" + "time" + + "github.com/gookit/color" + namegen "github.com/goombaio/namegenerator" + "github.com/gorilla/websocket" + sdk "github.com/mainflux/mainflux/pkg/sdk/go" + "golang.org/x/sync/errgroup" +) + +const ( + defPass = "12345678" + defReaderURL = "http://localhost:8905" + defWSPort = "8186" + numAdapters = 4 +) + +var ( + seed = time.Now().UTC().UnixNano() + namesgenerator = namegen.NewNameGenerator(seed) + msgFormat = `[{"bn":"demo", "bu":"V", "t": %d, "bver":5, "n":"voltage", "u":"V", "v":%d}]` +) + +// Config - test configuration. +type Config struct { + Host string + Num uint64 + NumOfMsg uint64 + SSL bool + CA string + CAKey string + Prefix string +} + +func init() { + rand.Seed(seed) +} + +// Test - function that does actual end to end testing. +// The operations are: +// - Create a user +// - Create other users +// - Do Read, Update and Change of Status operations on users + +// - Create groups using hierarchy +// - Do Read, Update and Change of Status operations on groups + +// - Create things +// - Do Read, Update and Change of Status operations on things + +// - Create channels +// - Do Read, Update and Change of Status operations on channels + +// - Connect thing to channel +// - Publish message from HTTP, MQTT, WS and CoAP Adapters +func Test(conf Config) { + sdkConf := sdk.Config{ + ThingsURL: fmt.Sprintf("http://%s", conf.Host), + UsersURL: fmt.Sprintf("http://%s", conf.Host), + ReaderURL: defReaderURL, + HTTPAdapterURL: fmt.Sprintf("http://%s/http", conf.Host), + BootstrapURL: fmt.Sprintf("http://%s", conf.Host), + CertsURL: fmt.Sprintf("http://%s", conf.Host), + MsgContentType: sdk.CTJSONSenML, + TLSVerification: false, + } + + s := sdk.NewSDK(sdkConf) + + magenta := color.FgLightMagenta.Render + + token, owner, err := createUser(s, conf) + if err != nil { + errExit(err) + } + color.Success.Printf("created user with token %s\n", magenta(token)) + + users, err := createUsers(s, conf, token) + if err != nil { + errExit(err) + } + color.Success.Printf("created users of ids:\n%s\n", magenta(getIDS(users))) + + groups, err := createGroups(s, conf, token) + if err != nil { + errExit(err) + } + color.Success.Printf("created groups of ids:\n%s\n", magenta(getIDS(groups))) + + things, err := createThings(s, conf, token) + if err != nil { + errExit(err) + } + color.Success.Printf("created things of ids:\n%s\n", magenta(getIDS(things))) + + channels, err := createChannels(s, conf, token) + if err != nil { + errExit(err) + } + color.Success.Printf("created channels of ids:\n%s\n", magenta(getIDS(channels))) + + if err := createPolicies(s, conf, token, owner, users, groups, things, channels); err != nil { + errExit(err) + } + color.Success.Println("created policies for users, groups, things and channels") + + // List users, groups, things and channels + if err := read(s, conf, token, users, groups, things, channels); err != nil { + errExit(err) + } + color.Success.Println("viewed users, groups, things and channels") + + // Update users, groups, things and channels + if err := update(s, token, users, groups, things, channels); err != nil { + errExit(err) + } + color.Success.Println("updated users, groups, things and channels") + + // Send messages to channels + if err := messaging(s, conf, token, things, channels); err != nil { + errExit(err) + } + color.Success.Println("sent messages to channels") +} + +func errExit(err error) { + color.Error.Println(err.Error()) + os.Exit(1) +} + +func createUser(s sdk.SDK, conf Config) (string, string, error) { + user := sdk.User{ + Name: fmt.Sprintf("%s-%s", conf.Prefix, namesgenerator.Generate()), + Credentials: sdk.Credentials{ + Identity: fmt.Sprintf("%s-%s@email.com", conf.Prefix, namesgenerator.Generate()), + Secret: defPass, + }, + Status: sdk.EnabledStatus, + } + + pass := user.Credentials.Secret + + user, err := s.CreateUser(user, "") + if err != nil { + return "", "", fmt.Errorf("unable to create user: %w", err) + } + + user.Credentials.Secret = pass + token, err := s.CreateToken(user) + if err != nil { + return "", "", fmt.Errorf("unable to login user: %w", err) + } + + return token.AccessToken, user.ID, nil +} + +func createUsers(s sdk.SDK, conf Config, token string) ([]sdk.User, error) { + var err error + users := []sdk.User{} + + for i := uint64(0); i < conf.Num; i++ { + user := sdk.User{ + Name: fmt.Sprintf("%s-%s", conf.Prefix, namesgenerator.Generate()), + Credentials: sdk.Credentials{ + Identity: fmt.Sprintf("%s-%s@email.com", conf.Prefix, namesgenerator.Generate()), + Secret: defPass}, + Status: sdk.EnabledStatus, + } + + user, err = s.CreateUser(user, token) + if err != nil { + return []sdk.User{}, fmt.Errorf("Failed to create the users: %w", err) + } + users = append(users, user) + } + + return users, nil +} + +func createGroups(s sdk.SDK, conf Config, token string) ([]sdk.Group, error) { + var err error + groups := []sdk.Group{} + + parentID := "" + for i := uint64(0); i < conf.Num; i++ { + group := sdk.Group{ + Name: fmt.Sprintf("%s-%s", conf.Prefix, namesgenerator.Generate()), + ParentID: parentID, + Status: sdk.EnabledStatus, + } + + group, err = s.CreateGroup(group, token) + if err != nil { + return []sdk.Group{}, fmt.Errorf("Failed to create the group: %w", err) + } + groups = append(groups, group) + parentID = group.ID + } + + return groups, nil +} + +func createThings(s sdk.SDK, conf Config, token string) ([]sdk.Thing, error) { + var err error + things := make([]sdk.Thing, conf.Num) + + for i := uint64(0); i < conf.Num; i++ { + things[i] = sdk.Thing{ + Name: fmt.Sprintf("%s-%s", conf.Prefix, namesgenerator.Generate()), + Status: sdk.EnabledStatus, + } + } + things, err = s.CreateThings(things, token) + if err != nil { + return []sdk.Thing{}, fmt.Errorf("Failed to create the things: %w", err) + } + + return things, nil +} + +func createChannels(s sdk.SDK, conf Config, token string) ([]sdk.Channel, error) { + var err error + channels := make([]sdk.Channel, conf.Num) + + for i := uint64(0); i < conf.Num; i++ { + channels[i] = sdk.Channel{ + Name: fmt.Sprintf("%s-%s", conf.Prefix, namesgenerator.Generate()), + Status: sdk.EnabledStatus, + } + } + + channels, err = s.CreateChannels(channels, token) + if err != nil { + return []sdk.Channel{}, fmt.Errorf("Failed to create the channels: %w", err) + } + + return channels, nil +} + +func createPolicies(s sdk.SDK, conf Config, token, owner string, users []sdk.User, groups []sdk.Group, things []sdk.Thing, channels []sdk.Channel) error { + for i := uint64(0); i < conf.Num; i++ { + upolicy := sdk.Policy{ + Subject: owner, + Object: users[i].ID, + Actions: []string{"c_delete", "c_update", "c_add", "c_list"}, + } + gpolicy := sdk.Policy{ + Subject: owner, + Object: groups[i].ID, + Actions: []string{"g_delete", "g_update", "g_add", "g_list"}, + } + tpolicy := sdk.Policy{ + Subject: owner, + Object: things[i].ID, + Actions: []string{"c_delete", "c_update", "c_add", "c_list"}, + } + cpolicy := sdk.Policy{ + Subject: owner, + Object: channels[i].ID, + Actions: []string{"g_delete", "g_update", "g_add", "g_list"}, + } + if err := s.CreatePolicy(upolicy, token); err != nil { + return err + } + if err := s.CreatePolicy(gpolicy, token); err != nil { + return err + } + if err := s.CreatePolicy(tpolicy, token); err != nil { + return err + } + if err := s.CreatePolicy(cpolicy, token); err != nil { + return err + } + } + + return nil +} + +func read(s sdk.SDK, conf Config, token string, users []sdk.User, groups []sdk.Group, things []sdk.Thing, channels []sdk.Channel) error { + for _, user := range users { + if _, err := s.User(user.ID, token); err != nil { + return err + } + } + up, err := s.Users(sdk.PageMetadata{}, token) + if err != nil { + return err + } + if up.Total != conf.Num { + return fmt.Errorf("returned users %d not equal to create users %d", up.Total, conf.Num) + } + for _, group := range groups { + if _, err := s.Group(group.ID, token); err != nil { + return err + } + } + gp, err := s.Groups(sdk.PageMetadata{}, token) + if err != nil { + return err + } + if gp.Total != conf.Num { + return fmt.Errorf("returned groups %d not equal to create groups %d", gp.Total, conf.Num) + } + for _, thing := range things { + if _, err := s.Thing(thing.ID, token); err != nil { + return err + } + } + tp, err := s.Things(sdk.PageMetadata{}, token) + if err != nil { + return err + } + if tp.Total != conf.Num { + return fmt.Errorf("returned things %d not equal to create things %d", tp.Total, conf.Num) + } + for _, channel := range channels { + if _, err := s.Channel(channel.ID, token); err != nil { + return err + } + } + cp, err := s.Channels(sdk.PageMetadata{}, token) + if err != nil { + return err + } + if cp.Total != conf.Num { + return fmt.Errorf("returned channels %d not equal to create channels %d", cp.Total, conf.Num) + } + + return nil +} + +func update(s sdk.SDK, token string, users []sdk.User, groups []sdk.Group, things []sdk.Thing, channels []sdk.Channel) error { + for _, user := range users { + user.Name = namesgenerator.Generate() + user.Metadata = sdk.Metadata{"Update": namesgenerator.Generate()} + rUser, err := s.UpdateUser(user, token) + if err != nil { + return fmt.Errorf("failed to update user %w", err) + } + if rUser.Name != user.Name { + return fmt.Errorf("failed to update user name before %s after %s", user.Name, rUser.Name) + } + if rUser.Metadata["Update"] != user.Metadata["Update"] { + return fmt.Errorf("failed to update user metadata before %s after %s", user.Metadata["Update"], rUser.Metadata["Update"]) + } + user = rUser + user.Credentials.Identity = namesgenerator.Generate() + rUser, err = s.UpdateUserIdentity(user, token) + if err != nil { + return fmt.Errorf("failed to update user identity %w", err) + } + if rUser.Credentials.Identity != user.Credentials.Identity { + return fmt.Errorf("failed to update user identity before %s after %s", user.Credentials.Identity, rUser.Credentials.Identity) + } + user = rUser + user.Tags = []string{namesgenerator.Generate()} + rUser, err = s.UpdateUserTags(user, token) + if err != nil { + return fmt.Errorf("failed to update user tags %w", err) + } + if rUser.Tags[0] != user.Tags[0] { + return fmt.Errorf("failed to update user tags before %s after %s", user.Tags[0], rUser.Tags[0]) + } + user = rUser + rUser, err = s.DisableUser(user.ID, token) + if err != nil { + return fmt.Errorf("failed to disable user %w", err) + } + if rUser.Status != sdk.DisabledStatus { + return fmt.Errorf("failed to disable user before %s after %s", user.Status, rUser.Status) + } + user = rUser + rUser, err = s.EnableUser(user.ID, token) + if err != nil { + return fmt.Errorf("failed to enable user %w", err) + } + if rUser.Status != sdk.EnabledStatus { + return fmt.Errorf("failed to enable user before %s after %s", user.Status, rUser.Status) + } + } + for _, group := range groups { + group.Name = namesgenerator.Generate() + group.Metadata = sdk.Metadata{"Update": namesgenerator.Generate()} + rGroup, err := s.UpdateGroup(group, token) + if err != nil { + return fmt.Errorf("failed to update group %w", err) + } + if rGroup.Name != group.Name { + return fmt.Errorf("failed to update group name before %s after %s", group.Name, rGroup.Name) + } + if rGroup.Metadata["Update"] != group.Metadata["Update"] { + return fmt.Errorf("failed to update group metadata before %s after %s", group.Metadata["Update"], rGroup.Metadata["Update"]) + } + group = rGroup + rGroup, err = s.DisableGroup(group.ID, token) + if err != nil { + return fmt.Errorf("failed to disable group %w", err) + } + if rGroup.Status != sdk.DisabledStatus { + return fmt.Errorf("failed to disable group before %s after %s", group.Status, rGroup.Status) + } + group = rGroup + rGroup, err = s.EnableGroup(group.ID, token) + if err != nil { + return fmt.Errorf("failed to enable group %w", err) + } + if rGroup.Status != sdk.EnabledStatus { + return fmt.Errorf("failed to enable group before %s after %s", group.Status, rGroup.Status) + } + } + for _, thing := range things { + thing.Name = namesgenerator.Generate() + thing.Metadata = sdk.Metadata{"Update": namesgenerator.Generate()} + rThing, err := s.UpdateThing(thing, token) + if err != nil { + return fmt.Errorf("failed to update thing %w", err) + } + if rThing.Name != thing.Name { + return fmt.Errorf("failed to update thing name before %s after %s", thing.Name, rThing.Name) + } + if rThing.Metadata["Update"] != thing.Metadata["Update"] { + return fmt.Errorf("failed to update thing metadata before %s after %s", thing.Metadata["Update"], rThing.Metadata["Update"]) + } + thing = rThing + rThing, err = s.UpdateThingSecret(thing.ID, thing.Credentials.Secret, token) + if err != nil { + return fmt.Errorf("failed to update thing secret %w", err) + } + if rThing.Credentials.Secret != thing.Credentials.Secret { + return fmt.Errorf("failed to update thing secret before %s after %s", thing.Credentials.Secret, rThing.Credentials.Secret) + } + thing = rThing + thing.Tags = []string{namesgenerator.Generate()} + rThing, err = s.UpdateThingTags(thing, token) + if err != nil { + return fmt.Errorf("failed to update thing tags %w", err) + } + if rThing.Tags[0] != thing.Tags[0] { + return fmt.Errorf("failed to update thing tags before %s after %s", thing.Tags[0], rThing.Tags[0]) + } + thing = rThing + rThing, err = s.DisableThing(thing.ID, token) + if err != nil { + return fmt.Errorf("failed to disable thing %w", err) + } + if rThing.Status != sdk.DisabledStatus { + return fmt.Errorf("failed to disable thing before %s after %s", thing.Status, rThing.Status) + } + thing = rThing + rThing, err = s.EnableThing(thing.ID, token) + if err != nil { + return fmt.Errorf("failed to enable thing %w", err) + } + if rThing.Status != sdk.EnabledStatus { + return fmt.Errorf("failed to enable thing before %s after %s", thing.Status, rThing.Status) + } + } + for _, channel := range channels { + channel.Name = namesgenerator.Generate() + channel.Metadata = sdk.Metadata{"Update": namesgenerator.Generate()} + rChannel, err := s.UpdateChannel(channel, token) + if err != nil { + return fmt.Errorf("failed to update channel %w", err) + } + if rChannel.Name != channel.Name { + return fmt.Errorf("failed to update channel name before %s after %s", channel.Name, rChannel.Name) + } + if rChannel.Metadata["Update"] != channel.Metadata["Update"] { + return fmt.Errorf("failed to update channel metadata before %s after %s", channel.Metadata["Update"], rChannel.Metadata["Update"]) + } + channel = rChannel + rChannel, err = s.DisableChannel(channel.ID, token) + if err != nil { + return fmt.Errorf("failed to disable channel %w", err) + } + if rChannel.Status != sdk.DisabledStatus { + return fmt.Errorf("failed to disable channel before %s after %s", channel.Status, rChannel.Status) + } + channel = rChannel + rChannel, err = s.EnableChannel(channel.ID, token) + if err != nil { + return fmt.Errorf("failed to enable channel %w", err) + } + if rChannel.Status != sdk.EnabledStatus { + return fmt.Errorf("failed to enable channel before %s after %s", channel.Status, rChannel.Status) + } + } + + return nil +} + +func messaging(s sdk.SDK, conf Config, token string, things []sdk.Thing, channels []sdk.Channel) error { + for _, thing := range things { + for _, channel := range channels { + if err := s.ConnectThing(thing.ID, channel.ID, token); err != nil { + return fmt.Errorf("failed to connect thing %s to channel %s", thing.ID, channel.ID) + } + } + } + g := new(errgroup.Group) + + bt := time.Now().Unix() + for i := uint64(0); i < conf.NumOfMsg; i++ { + for _, thing := range things { + for _, channel := range channels { + func(num int64, thing sdk.Thing, channel sdk.Channel) { + g.Go(func() error { + msg := fmt.Sprintf(msgFormat, num+1, rand.Int()) + return sendHTTPMessage(s, msg, thing, channel.ID) + }) + g.Go(func() error { + msg := fmt.Sprintf(msgFormat, num+2, rand.Int()) + return sendCoAPMessage(msg, thing, channel.ID) + }) + g.Go(func() error { + msg := fmt.Sprintf(msgFormat, num+3, rand.Int()) + return sendMQTTMessage(msg, thing, channel.ID) + }) + g.Go(func() error { + msg := fmt.Sprintf(msgFormat, num+4, rand.Int()) + return sendWSMessage(conf, msg, thing, channel.ID) + }) + }(bt, thing, channel) + bt += numAdapters + } + } + } + + return g.Wait() +} + +func sendHTTPMessage(s sdk.SDK, msg string, thing sdk.Thing, chanID string) error { + if err := s.SendMessage(chanID, msg, thing.Credentials.Secret); err != nil { + return fmt.Errorf("HTTP failed to send message from thing %s to channel %s: %w", thing.ID, chanID, err) + } + + return nil +} + +func sendCoAPMessage(msg string, thing sdk.Thing, chanID string) error { + cmd := exec.Command("coap-cli", "post", fmt.Sprintf("channels/%s/messages", chanID), "-auth", thing.Credentials.Secret, "-d", msg) + if _, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("CoAP failed to send message from thing %s to channel %s: %w", thing.ID, chanID, err) + } + + return nil +} + +func sendMQTTMessage(msg string, thing sdk.Thing, chanID string) error { + cmd := exec.Command("mosquitto_pub", "--id-prefix", "mainflux", "-u", thing.ID, "-P", thing.Credentials.Secret, "-t", fmt.Sprintf("channels/%s/messages", chanID), "-h", "localhost", "-m", msg) + if _, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("MQTT failed to send message from thing %s to channel %s: %w", thing.ID, chanID, err) + } + + return nil +} + +func sendWSMessage(conf Config, msg string, thing sdk.Thing, chanID string) error { + socketURL := fmt.Sprintf("ws://%s:%s/channels/%s/messages", conf.Host, defWSPort, chanID) + header := http.Header{"authorization": []string{thing.Credentials.Secret}} + conn, _, err := websocket.DefaultDialer.Dial(socketURL, header) + if err != nil { + return fmt.Errorf("unable to connect to websocket: %w", err) + } + defer conn.Close() + if err := conn.WriteMessage(websocket.TextMessage, []byte(msg)); err != nil { + return fmt.Errorf("WS failed to send message from thing %s to channel %s: %w", thing.ID, chanID, err) + } + + return nil +} + +// getIDS returns a list of IDs of the given objects. +func getIDS(objects interface{}) string { + v := reflect.ValueOf(objects) + if v.Kind() != reflect.Slice { + panic("objects argument must be a slice") + } + ids := make([]string, v.Len()) + for i := 0; i < v.Len(); i++ { + id := v.Index(i).FieldByName("ID").String() + ids[i] = id + } + idList := strings.Join(ids, "\n") + + return idList +} diff --git a/tools/mqtt-bench/.gitignore b/tools/mqtt-bench/.gitignore deleted file mode 100644 index 53d3d23c98..0000000000 --- a/tools/mqtt-bench/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Mainflux -# SPDX-License-Identifier: Apache-2.0 - -mqtt-bench \ No newline at end of file diff --git a/tools/mqtt-bench/bench.go b/tools/mqtt-bench/bench.go index c5aafcd82c..f21dfc89d7 100644 --- a/tools/mqtt-bench/bench.go +++ b/tools/mqtt-bench/bench.go @@ -8,7 +8,7 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "log" "os" "strconv" @@ -39,10 +39,10 @@ func Benchmark(cfg Config) { if err != nil { logger.Warn(err.Error()) } - caByte, _ = ioutil.ReadAll(caFile) + caByte, _ = io.ReadAll(caFile) } - data, err := ioutil.ReadFile(cfg.Mf.ConnFile) + data, err := os.ReadFile(cfg.Mf.ConnFile) if err != nil { logger.Fatal(fmt.Sprintf("Error loading connections file: %s", err)) } diff --git a/tools/provision/.gitignore b/tools/provision/.gitignore deleted file mode 100644 index d231d5f489..0000000000 --- a/tools/provision/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Mainflux -# SPDX-License-Identifier: Apache-2.0 - -mfconn.toml -provision \ No newline at end of file diff --git a/tools/provision/provision.go b/tools/provision/provision.go index 68d8712186..56cec9a512 100644 --- a/tools/provision/provision.go +++ b/tools/provision/provision.go @@ -14,7 +14,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "os" @@ -59,7 +58,6 @@ func Provision(conf Config) { msgContentType := string(sdk.CTJSONSenML) sdkConf := sdk.Config{ - AuthURL: conf.Host, ThingsURL: conf.Host, UsersURL: conf.Host, ReaderURL: defReaderURL, @@ -73,13 +71,15 @@ func Provision(conf Config) { s := sdk.NewSDK(sdkConf) user := sdk.User{ - Email: conf.Username, - Password: conf.Password, + Credentials: sdk.Credentials{ + Identity: conf.Username, + Secret: conf.Password, + }, } - if user.Email == "" { - user.Email = fmt.Sprintf("%s@email.com", namesgenerator.GetRandomName(0)) - user.Password = defPass + if user.Credentials.Identity == "" { + user.Credentials.Identity = fmt.Sprintf("%s@email.com", namesgenerator.GetRandomName(0)) + user.Credentials.Secret = defPass } // Create new user @@ -107,7 +107,7 @@ func Provision(conf Config) { log.Fatalf("Failed to load CA cert") } - b, err := ioutil.ReadFile(conf.CA) + b, err := os.ReadFile(conf.CA) if err != nil { log.Fatalf("Failed to load CA cert") } @@ -136,12 +136,12 @@ func Provision(conf Config) { channels[i] = sdk.Channel{Name: fmt.Sprintf("%s-channel-%d", conf.Prefix, i)} } - things, err = s.CreateThings(things, token) + things, err = s.CreateThings(things, token.AccessToken) if err != nil { log.Fatalf("Failed to create the things: %s", err.Error()) } - channels, err = s.CreateChannels(channels, token) + channels, err = s.CreateChannels(channels, token.AccessToken) if err != nil { log.Fatalf("Failed to create the chennels: %s", err.Error()) } @@ -179,7 +179,7 @@ func Provision(conf Config) { SerialNumber: serialNumber, Subject: pkix.Name{ Organization: []string{"Mainflux"}, - CommonName: things[i].Key, + CommonName: things[i].Credentials.Secret, OrganizationalUnit: []string{"mainflux"}, }, NotBefore: notBefore, @@ -213,7 +213,7 @@ func Provision(conf Config) { } // Print output - fmt.Printf("[[things]]\nthing_id = \"%s\"\nthing_key = \"%s\"\n", things[i].ID, things[i].Key) + fmt.Printf("[[things]]\nthing_id = \"%s\"\nthing_key = \"%s\"\n", things[i].ID, things[i].Credentials.Secret) if conf.SSL { fmt.Printf("mtls_cert = \"\"\"%s\"\"\"\n", cert) fmt.Printf("mtls_key = \"\"\"%s\"\"\"\n", key) @@ -231,7 +231,7 @@ func Provision(conf Config) { ChannelIDs: cIDs, ThingIDs: tIDs, } - if err := s.Connect(conIDs, token); err != nil { + if err := s.Connect(conIDs, token.AccessToken); err != nil { log.Fatalf("Failed to connect things %s to channels %s: %s", conIDs.ThingIDs, conIDs.ChannelIDs, err) } } diff --git a/twins/README.md b/twins/README.md index 9cf4a38ae3..4bef3429f5 100644 --- a/twins/README.md +++ b/twins/README.md @@ -18,18 +18,18 @@ default values. | MF_TWINS_HTTP_PORT | Twins service HTTP port | 9018 | | MF_TWINS_SERVER_CERT | Path to server certificate in PEM format | | | MF_TWINS_SERVER_KEY | Path to server key in PEM format | | -| MF_JAEGER_URL | Jaeger server URL | localhost:6831 | +| MF_JAEGER_URL | Jaeger server URL | localhost:6831 | | MF_TWINS_DB | Database name | mainflux | | MF_TWINS_DB_HOST | Database host address | localhost | | MF_TWINS_DB_PORT | Database host port | 27017 | -| MF_THINGS_STANDALONE_EMAIL | User email for standalone mode (no gRPC communication with users) | | +| MF_THINGS_STANDALONE_ID | User ID for standalone mode (no gRPC communication with users) | | | MF_THINGS_STANDALONE_TOKEN | User token for standalone mode that should be passed in auth header | | | MF_TWINS_CLIENT_TLS | Flag that indicates if TLS should be turned on | false | | MF_TWINS_CA_CERTS | Path to trusted CAs in PEM format | | | MF_TWINS_CHANNEL_ID | Message broker notifications channel ID | | | MF_BROKER_URL | Mainflux Message broker URL | nats://localhost:4222 | -| MF_AUTH_GRPC_URL | Auth service gRPC URL | localhost:7001 | -| MF_AUTH_GRPC_TIMEOUT | Auth service gRPC request timeout in seconds | 1s | +| MF_AUTH_GRPC_URL | Users service gRPC URL | localhost:7001 | +| MF_AUTH_GRPC_TIMEOUT | Users service gRPC request timeout in seconds | 1s | | MF_TWINS_CACHE_URL | Cache database URL | localhost:6379 | | MF_TWINS_CACHE_PASS | Cache database password | | | MF_TWINS_CACHE_DB | Cache instance name | 0 | @@ -69,8 +69,8 @@ MF_TWINS_CLIENT_TLS: [Flag that indicates if TLS should be turned on] \ MF_TWINS_CA_CERTS: [Path to trusted CAs in PEM format] \ MF_TWINS_CHANNEL_ID: [Message broker notifications channel ID] \ MF_BROKER_URL: [Mainflux Message broker URL] \ -MF_AUTH_GRPC_URL: [Auth service gRPC URL] \ -MF_AUTH_GRPC_TIMEOUT: [Auth service gRPC request timeout in seconds] \ +MF_AUTH_GRPC_URL: [Users service gRPC URL] \ +MF_AUTH_GRPC_TIMEOUT: [Users service gRPC request timeout in seconds] \ $GOBIN/mainflux-twins ``` diff --git a/twins/api/http/endpoint_states_test.go b/twins/api/http/endpoint_states_test.go index 8b1cbf5595..9b75fb242a 100644 --- a/twins/api/http/endpoint_states_test.go +++ b/twins/api/http/endpoint_states_test.go @@ -51,7 +51,7 @@ func TestListStates(t *testing.T) { attr := def.Attributes[0] var recs = make([]senml.Record, numRecs) - mocks.CreateSenML(numRecs, recs) + mocks.CreateSenML(recs) message, err := mocks.CreateMessage(attr, recs) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) err = svc.SaveStates(message) diff --git a/twins/api/http/endpoint_twins_test.go b/twins/api/http/endpoint_twins_test.go index e86282c172..fbd673951e 100644 --- a/twins/api/http/endpoint_twins_test.go +++ b/twins/api/http/endpoint_twins_test.go @@ -19,7 +19,6 @@ import ( "github.com/mainflux/mainflux/twins" httpapi "github.com/mainflux/mainflux/twins/api/http" "github.com/mainflux/mainflux/twins/mocks" - "github.com/opentracing/opentracing-go/mocktracer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -85,7 +84,7 @@ func (tr testRequest) make() (*http.Response, error) { func newServer(svc twins.Service) *httptest.Server { logger := logger.NewMock() - mux := httpapi.MakeHandler(mocktracer.New(), svc, logger) + mux := httpapi.MakeHandler(svc, logger) return httptest.NewServer(mux) } diff --git a/twins/api/http/transport.go b/twins/api/http/transport.go index 72e75c9588..55df78b625 100644 --- a/twins/api/http/transport.go +++ b/twins/api/http/transport.go @@ -9,7 +9,6 @@ import ( "net/http" "strings" - kitot "github.com/go-kit/kit/tracing/opentracing" kithttp "github.com/go-kit/kit/transport/http" "github.com/go-zoo/bone" "github.com/mainflux/mainflux" @@ -17,8 +16,8 @@ import ( "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/twins" - opentracing "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" ) const ( @@ -32,7 +31,7 @@ const ( ) // MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(tracer opentracing.Tracer, svc twins.Service, logger logger.Logger) http.Handler { +func MakeHandler(svc twins.Service, logger logger.Logger) http.Handler { opts := []kithttp.ServerOption{ kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), } @@ -40,42 +39,42 @@ func MakeHandler(tracer opentracing.Tracer, svc twins.Service, logger logger.Log r := bone.New() r.Post("/twins", kithttp.NewServer( - kitot.TraceServer(tracer, "add_twin")(addTwinEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("add_twin"))(addTwinEndpoint(svc)), decodeTwinCreation, encodeResponse, opts..., )) r.Put("/twins/:twinID", kithttp.NewServer( - kitot.TraceServer(tracer, "update_twin")(updateTwinEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("update_twin"))(updateTwinEndpoint(svc)), decodeTwinUpdate, encodeResponse, opts..., )) r.Get("/twins/:twinID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_twin")(viewTwinEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("view_twin"))(viewTwinEndpoint(svc)), decodeView, encodeResponse, opts..., )) r.Delete("/twins/:twinID", kithttp.NewServer( - kitot.TraceServer(tracer, "remove_twin")(removeTwinEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("remove_twin"))(removeTwinEndpoint(svc)), decodeView, encodeResponse, opts..., )) r.Get("/twins", kithttp.NewServer( - kitot.TraceServer(tracer, "list_twins")(listTwinsEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("list_twins"))(listTwinsEndpoint(svc)), decodeList, encodeResponse, opts..., )) r.Get("/states/:twinID", kithttp.NewServer( - kitot.TraceServer(tracer, "list_states")(listStatesEndpoint(svc)), + otelkit.EndpointMiddleware(otelkit.WithOperation("list_states"))(listStatesEndpoint(svc)), decodeListStates, encodeResponse, opts..., diff --git a/twins/api/logging.go b/twins/api/logging.go index dafe88d1f2..b9acf98549 100644 --- a/twins/api/logging.go +++ b/twins/api/logging.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/twins" ) @@ -18,12 +18,12 @@ import ( var _ twins.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc twins.Service } // LoggingMiddleware adds logging facilities to the core service. -func LoggingMiddleware(svc twins.Service, logger log.Logger) twins.Service { +func LoggingMiddleware(svc twins.Service, logger mflog.Logger) twins.Service { return &loggingMiddleware{logger, svc} } diff --git a/twins/mocks/auth.go b/twins/mocks/auth.go index 3b015e072c..2d2e7684b6 100644 --- a/twins/mocks/auth.go +++ b/twins/mocks/auth.go @@ -6,54 +6,45 @@ package mocks import ( "context" - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" "google.golang.org/grpc" ) -var _ mainflux.AuthServiceClient = (*authServiceClient)(nil) +var _ policies.AuthServiceClient = (*authServiceClient)(nil) type authServiceClient struct { users map[string]string } -func (svc authServiceClient) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { +func (svc authServiceClient) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { panic("not implemented") } // NewAuthServiceClient creates mock of auth service. -func NewAuthServiceClient(users map[string]string) mainflux.AuthServiceClient { +func NewAuthServiceClient(users map[string]string) policies.AuthServiceClient { return &authServiceClient{users} } -func (svc authServiceClient) Identify(ctx context.Context, in *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { +func (svc authServiceClient) Identify(ctx context.Context, in *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { if id, ok := svc.users[in.Value]; ok { - return &mainflux.UserIdentity{Id: id, Email: id}, nil + return &policies.UserIdentity{Id: id}, nil } return nil, errors.ErrAuthentication } -func (svc *authServiceClient) Issue(ctx context.Context, in *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { - return new(mainflux.Token), nil +func (svc *authServiceClient) Issue(ctx context.Context, in *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { + return new(policies.Token), nil } -func (svc *authServiceClient) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { +func (svc *authServiceClient) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { panic("not implemented") } -func (svc authServiceClient) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { +func (svc authServiceClient) AddPolicy(ctx context.Context, in *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { panic("not implemented") } -func (svc authServiceClient) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - panic("not implemented") -} - -func (svc *authServiceClient) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - panic("not implemented") -} - -func (svc *authServiceClient) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { +func (svc authServiceClient) DeletePolicy(ctx context.Context, in *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { panic("not implemented") } diff --git a/twins/mocks/service.go b/twins/mocks/service.go index b6dd40d6f6..90e61d88d8 100644 --- a/twins/mocks/service.go +++ b/twins/mocks/service.go @@ -52,7 +52,7 @@ func CreateTwin(channels []string, subtopics []string) twins.Twin { } // CreateSenML creates SenML record array -func CreateSenML(n int, recs []senml.Record) { +func CreateSenML(recs []senml.Record) { for i, rec := range recs { rec.BaseTime = float64(time.Now().Unix()) rec.Time = float64(i) diff --git a/twins/mongodb/twins_test.go b/twins/mongodb/twins_test.go index 6854536bb5..8fa9367c0d 100644 --- a/twins/mongodb/twins_test.go +++ b/twins/mongodb/twins_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/uuid" "github.com/mainflux/mainflux/twins" @@ -35,7 +35,7 @@ const ( var ( port string addr string - testLog, _ = log.New(os.Stdout, log.Info.String()) + testLog, _ = mflog.New(os.Stdout, mflog.Info.String()) idProvider = uuid.New() invalidName = strings.Repeat("m", maxNameSize+1) ) diff --git a/twins/service.go b/twins/service.go index 127fffad58..6a43f8168a 100644 --- a/twins/service.go +++ b/twins/service.go @@ -13,6 +13,7 @@ import ( "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/users/policies" "github.com/mainflux/mainflux" "github.com/mainflux/senml" @@ -74,7 +75,7 @@ var crudOp = map[string]string{ type twinsService struct { publisher messaging.Publisher - auth mainflux.AuthServiceClient + auth policies.AuthServiceClient twins TwinRepository states StateRepository idProvider mainflux.IDProvider @@ -86,7 +87,7 @@ type twinsService struct { var _ Service = (*twinsService)(nil) // New instantiates the twins service implementation. -func New(publisher messaging.Publisher, auth mainflux.AuthServiceClient, twins TwinRepository, tcache TwinCache, sr StateRepository, idp mainflux.IDProvider, chann string, logger logger.Logger) Service { +func New(publisher messaging.Publisher, auth policies.AuthServiceClient, twins TwinRepository, tcache TwinCache, sr StateRepository, idp mainflux.IDProvider, chann string, logger logger.Logger) Service { return &twinsService{ publisher: publisher, auth: auth, @@ -104,7 +105,7 @@ func (ts *twinsService) AddTwin(ctx context.Context, token string, twin Twin, de var b []byte defer ts.publish(ctx, &id, &err, crudOp["createSucc"], crudOp["createFail"], &b) - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + res, err := ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Twin{}, err } @@ -114,7 +115,7 @@ func (ts *twinsService) AddTwin(ctx context.Context, token string, twin Twin, de return Twin{}, err } - twin.Owner = res.GetEmail() + twin.Owner = res.GetId() t := time.Now() twin.Created = t @@ -147,7 +148,7 @@ func (ts *twinsService) UpdateTwin(ctx context.Context, token string, twin Twin, var id string defer ts.publish(ctx, &id, &err, crudOp["updateSucc"], crudOp["updateFail"], &b) - _, err = ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + _, err = ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return errors.ErrAuthentication } @@ -197,7 +198,7 @@ func (ts *twinsService) ViewTwin(ctx context.Context, token, twinID string) (tw var b []byte defer ts.publish(ctx, &twinID, &err, crudOp["getSucc"], crudOp["getFail"], &b) - _, err = ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + _, err = ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Twin{}, err } @@ -216,7 +217,7 @@ func (ts *twinsService) RemoveTwin(ctx context.Context, token, twinID string) (e var b []byte defer ts.publish(ctx, &twinID, &err, crudOp["removeSucc"], crudOp["removeFail"], &b) - _, err = ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + _, err = ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return errors.ErrAuthentication } @@ -229,16 +230,16 @@ func (ts *twinsService) RemoveTwin(ctx context.Context, token, twinID string) (e } func (ts *twinsService) ListTwins(ctx context.Context, token string, offset uint64, limit uint64, name string, metadata Metadata) (Page, error) { - res, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + res, err := ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return Page{}, errors.ErrAuthentication } - return ts.twins.RetrieveAll(ctx, res.GetEmail(), offset, limit, name, metadata) + return ts.twins.RetrieveAll(ctx, res.GetId(), offset, limit, name, metadata) } func (ts *twinsService) ListStates(ctx context.Context, token string, offset uint64, limit uint64, twinID string) (StatesPage, error) { - _, err := ts.auth.Identify(ctx, &mainflux.Token{Value: token}) + _, err := ts.auth.Identify(ctx, &policies.Token{Value: token}) if err != nil { return StatesPage{}, errors.ErrAuthentication } diff --git a/twins/service_test.go b/twins/service_test.go index a6f0e308fe..3f7a824ace 100644 --- a/twins/service_test.go +++ b/twins/service_test.go @@ -269,7 +269,7 @@ func TestSaveStates(t *testing.T) { require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) var recs = make([]senml.Record, numRecs) - mocks.CreateSenML(numRecs, recs) + mocks.CreateSenML(recs) var ttlAdded uint64 @@ -343,7 +343,7 @@ func TestListStates(t *testing.T) { require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) var recs = make([]senml.Record, numRecs) - mocks.CreateSenML(numRecs, recs) + mocks.CreateSenML(recs) message, err := mocks.CreateMessage(attr, recs) require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) err = svc.SaveStates(message) diff --git a/twins/tracing/states.go b/twins/tracing/states.go index 3c5c4314a2..f4116c7410 100644 --- a/twins/tracing/states.go +++ b/twins/tracing/states.go @@ -7,7 +7,7 @@ import ( "context" "github.com/mainflux/mainflux/twins" - opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) const ( @@ -20,13 +20,13 @@ const ( var _ twins.StateRepository = (*stateRepositoryMiddleware)(nil) type stateRepositoryMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer repo twins.StateRepository } // StateRepositoryMiddleware tracks request and their latency, and adds spans // to context. -func StateRepositoryMiddleware(tracer opentracing.Tracer, repo twins.StateRepository) twins.StateRepository { +func StateRepositoryMiddleware(tracer trace.Tracer, repo twins.StateRepository) twins.StateRepository { return stateRepositoryMiddleware{ tracer: tracer, repo: repo, @@ -34,41 +34,36 @@ func StateRepositoryMiddleware(tracer opentracing.Tracer, repo twins.StateReposi } func (trm stateRepositoryMiddleware) Save(ctx context.Context, st twins.State) error { - span := createSpan(ctx, trm.tracer, saveStateOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, saveStateOp) + defer span.End() return trm.repo.Save(ctx, st) } func (trm stateRepositoryMiddleware) Update(ctx context.Context, st twins.State) error { - span := createSpan(ctx, trm.tracer, updateStateOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, updateStateOp) + defer span.End() return trm.repo.Update(ctx, st) } func (trm stateRepositoryMiddleware) Count(ctx context.Context, tw twins.Twin) (int64, error) { - span := createSpan(ctx, trm.tracer, countStatesOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, countStatesOp) + defer span.End() return trm.repo.Count(ctx, tw) } func (trm stateRepositoryMiddleware) RetrieveAll(ctx context.Context, offset, limit uint64, twinID string) (twins.StatesPage, error) { - span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) + defer span.End() return trm.repo.RetrieveAll(ctx, offset, limit, twinID) } func (trm stateRepositoryMiddleware) RetrieveLast(ctx context.Context, twinID string) (twins.State, error) { - span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, retrieveAllStatesOp) + defer span.End() return trm.repo.RetrieveLast(ctx, twinID) } diff --git a/twins/tracing/twins.go b/twins/tracing/twins.go index dcea5936f5..16e6107126 100644 --- a/twins/tracing/twins.go +++ b/twins/tracing/twins.go @@ -7,7 +7,7 @@ import ( "context" "github.com/mainflux/mainflux/twins" - opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) const ( @@ -23,12 +23,12 @@ const ( var _ twins.TwinRepository = (*twinRepositoryMiddleware)(nil) type twinRepositoryMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer repo twins.TwinRepository } // TwinRepositoryMiddleware tracks request and their latency, and adds spans to context. -func TwinRepositoryMiddleware(tracer opentracing.Tracer, repo twins.TwinRepository) twins.TwinRepository { +func TwinRepositoryMiddleware(tracer trace.Tracer, repo twins.TwinRepository) twins.TwinRepository { return twinRepositoryMiddleware{ tracer: tracer, repo: repo, @@ -36,60 +36,54 @@ func TwinRepositoryMiddleware(tracer opentracing.Tracer, repo twins.TwinReposito } func (trm twinRepositoryMiddleware) Save(ctx context.Context, tw twins.Twin) (string, error) { - span := createSpan(ctx, trm.tracer, saveTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, saveTwinOp) + defer span.End() return trm.repo.Save(ctx, tw) } func (trm twinRepositoryMiddleware) Update(ctx context.Context, tw twins.Twin) error { - span := createSpan(ctx, trm.tracer, updateTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, updateTwinOp) + defer span.End() return trm.repo.Update(ctx, tw) } func (trm twinRepositoryMiddleware) RetrieveByID(ctx context.Context, twinID string) (twins.Twin, error) { - span := createSpan(ctx, trm.tracer, retrieveTwinByIDOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, retrieveTwinByIDOp) + defer span.End() return trm.repo.RetrieveByID(ctx, twinID) } func (trm twinRepositoryMiddleware) RetrieveAll(ctx context.Context, owner string, offset, limit uint64, name string, metadata twins.Metadata) (twins.Page, error) { - span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) + defer span.End() return trm.repo.RetrieveAll(ctx, owner, offset, limit, name, metadata) } func (trm twinRepositoryMiddleware) RetrieveByAttribute(ctx context.Context, channel, subtopic string) ([]string, error) { - span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, retrieveAllTwinsOp) + defer span.End() return trm.repo.RetrieveByAttribute(ctx, channel, subtopic) } func (trm twinRepositoryMiddleware) Remove(ctx context.Context, twinID string) error { - span := createSpan(ctx, trm.tracer, removeTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, trm.tracer, removeTwinOp) + defer span.End() return trm.repo.Remove(ctx, twinID) } type twinCacheMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer cache twins.TwinCache } // TwinCacheMiddleware tracks request and their latency, and adds spans to context. -func TwinCacheMiddleware(tracer opentracing.Tracer, cache twins.TwinCache) twins.TwinCache { +func TwinCacheMiddleware(tracer trace.Tracer, cache twins.TwinCache) twins.TwinCache { return twinCacheMiddleware{ tracer: tracer, cache: cache, @@ -97,51 +91,41 @@ func TwinCacheMiddleware(tracer opentracing.Tracer, cache twins.TwinCache) twins } func (tcm twinCacheMiddleware) Save(ctx context.Context, twin twins.Twin) error { - span := createSpan(ctx, tcm.tracer, saveTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, tcm.tracer, saveTwinOp) + defer span.End() return tcm.cache.Save(ctx, twin) } func (tcm twinCacheMiddleware) SaveIDs(ctx context.Context, channel, subtopic string, ids []string) error { - span := createSpan(ctx, tcm.tracer, saveTwinsOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, tcm.tracer, saveTwinsOp) + defer span.End() return tcm.cache.SaveIDs(ctx, channel, subtopic, ids) } func (tcm twinCacheMiddleware) Update(ctx context.Context, twin twins.Twin) error { - span := createSpan(ctx, tcm.tracer, updateTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, tcm.tracer, updateTwinOp) + defer span.End() return tcm.cache.Update(ctx, twin) } func (tcm twinCacheMiddleware) IDs(ctx context.Context, channel, subtopic string) ([]string, error) { - span := createSpan(ctx, tcm.tracer, retrieveTwinsByAttributeOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, tcm.tracer, retrieveTwinsByAttributeOp) + defer span.End() return tcm.cache.IDs(ctx, channel, subtopic) } func (tcm twinCacheMiddleware) Remove(ctx context.Context, twinID string) error { - span := createSpan(ctx, tcm.tracer, removeTwinOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := createSpan(ctx, tcm.tracer, removeTwinOp) + defer span.End() return tcm.cache.Remove(ctx, twinID) } -func createSpan(ctx context.Context, tracer opentracing.Tracer, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tracer.StartSpan(opName) +func createSpan(ctx context.Context, tracer trace.Tracer, opName string) (context.Context, trace.Span) { + + return tracer.Start(ctx, opName) } diff --git a/users/README.md b/users/README.md index 8e9dcb71ac..9f37dc8496 100644 --- a/users/README.md +++ b/users/README.md @@ -1,4 +1,4 @@ -# Users service +# Clients Users service provides an HTTP API for managing users. Through this API clients are able to do the following actions: @@ -45,8 +45,7 @@ default values. ## Deployment -The service itself is distributed as Docker container. Check the [`users`](https://github.com/mainflux/mainflux/blob/master/docker/docker-compose.yml#L109-L143) service section in -docker-compose to see how service is deployed. +The service itself is distributed as Docker container. Check the [`users`](https://github.com/mainflux/mainflux/blob/master/docker/docker-compose.yml#L109-L143) service section in docker-compose to see how service is deployed. To start the service outside of the container, execute the following shell script: @@ -95,4 +94,4 @@ If `MF_EMAIL_TEMPLATE` doesn't point to any file service will function but passw For more information about service capabilities and its usage, please check out the [API documentation](https://api.mainflux.io/?urls.primaryName=users-openapi.yml). -[doc]: https://docs.mainflux.io +[doc]: https://docs.mainflux.io \ No newline at end of file diff --git a/users/api/doc.go b/users/api/doc.go deleted file mode 100644 index fb3127e46b..0000000000 --- a/users/api/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package api contains API-related concerns: endpoint definitions, middlewares -// and all resource representations. -package api diff --git a/users/api/endpoint.go b/users/api/endpoint.go deleted file mode 100644 index 59ef11d698..0000000000 --- a/users/api/endpoint.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "context" - - "github.com/go-kit/kit/endpoint" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" -) - -func registrationEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(createUserReq) - if err := req.validate(); err != nil { - return createUserRes{}, err - } - uid, err := svc.Register(ctx, req.token, req.user) - if err != nil { - return createUserRes{}, err - } - ucr := createUserRes{ - ID: uid, - created: true, - } - - return ucr, nil - } -} - -// Password reset request endpoint. -// When successful password reset link is generated. -// Link is generated using MF_TOKEN_RESET_ENDPOINT env. -// and value from Referer header for host. -// {Referer}+{MF_TOKEN_RESET_ENDPOINT}+{token=TOKEN} -// http://mainflux.com/reset-request?token=xxxxxxxxxxx. -// Email with a link is being sent to the user. -// When user clicks on a link it should get the ui with form to -// enter new password, when form is submitted token and new password -// must be sent as PUT request to 'password/reset' passwordResetEndpoint -func passwordResetRequestEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(passwResetReq) - if err := req.validate(); err != nil { - return nil, err - } - res := passwResetReqRes{} - email := req.Email - if err := svc.GenerateResetToken(ctx, email, req.Host); err != nil { - return nil, err - } - res.Msg = MailSent - - return res, nil - } -} - -// This is endpoint that actually sets new password in password reset flow. -// When user clicks on a link in email finally ends on this endpoint as explained in -// the comment above. -func passwordResetEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(resetTokenReq) - if err := req.validate(); err != nil { - return nil, err - } - res := passwChangeRes{} - if err := svc.ResetPassword(ctx, req.Token, req.Password); err != nil { - return nil, err - } - return res, nil - } -} - -func viewUserEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewUserReq) - if err := req.validate(); err != nil { - return nil, err - } - - u, err := svc.ViewUser(ctx, req.token, req.id) - if err != nil { - return nil, err - } - return viewUserRes{ - ID: u.ID, - Email: u.Email, - Metadata: u.Metadata, - }, nil - } -} - -func viewProfileEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(viewUserReq) - if err := req.validate(); err != nil { - return nil, err - } - - u, err := svc.ViewProfile(ctx, req.token) - if err != nil { - return nil, err - } - return viewUserRes{ - ID: u.ID, - Email: u.Email, - Metadata: u.Metadata, - }, nil - } -} - -func listUsersEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listUsersReq) - if err := req.validate(); err != nil { - return users.UserPage{}, err - } - pm := users.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - Email: req.email, - Status: req.status, - Metadata: req.metadata, - } - up, err := svc.ListUsers(ctx, req.token, pm) - if err != nil { - return users.UserPage{}, err - } - return buildUsersResponse(up), nil - } -} - -func updateUserEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(updateUserReq) - if err := req.validate(); err != nil { - return nil, err - } - user := users.User{ - Metadata: req.Metadata, - } - err := svc.UpdateUser(ctx, req.token, user) - if err != nil { - return nil, err - } - return updateUserRes{}, nil - } -} - -func passwordChangeEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(passwChangeReq) - if err := req.validate(); err != nil { - return nil, err - } - res := passwChangeRes{} - if err := svc.ChangePassword(ctx, req.token, req.Password, req.OldPassword); err != nil { - return nil, err - } - return res, nil - } -} - -func loginEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(userReq) - if err := req.validate(); err != nil { - return nil, err - } - token, err := svc.Login(ctx, req.user) - if err != nil { - return nil, err - } - - return tokenRes{token}, nil - } -} - -func listMembersEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(listMemberGroupReq) - if err := req.validate(); err != nil { - return userPageRes{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - - pm := users.PageMetadata{ - Offset: req.offset, - Limit: req.limit, - Status: req.status, - Metadata: req.metadata, - } - page, err := svc.ListMembers(ctx, req.token, req.id, pm) - if err != nil { - return userPageRes{}, err - } - - return buildUsersResponse(page), nil - } -} - -func enableUserEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(changeUserStatusReq) - if err := req.validate(); err != nil { - return nil, err - } - if err := svc.EnableUser(ctx, req.token, req.id); err != nil { - return nil, err - } - return deleteRes{}, nil - } -} - -func disableUserEndpoint(svc users.Service) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (interface{}, error) { - req := request.(changeUserStatusReq) - if err := req.validate(); err != nil { - return nil, err - } - if err := svc.DisableUser(ctx, req.token, req.id); err != nil { - return nil, err - } - return deleteRes{}, nil - } -} - -func buildUsersResponse(up users.UserPage) userPageRes { - res := userPageRes{ - pageRes: pageRes{ - Total: up.Total, - Offset: up.Offset, - Limit: up.Limit, - }, - Users: []viewUserRes{}, - } - for _, user := range up.Users { - view := viewUserRes{ - ID: user.ID, - Email: user.Email, - Metadata: user.Metadata, - } - res.Users = append(res.Users, view) - } - return res -} diff --git a/users/api/endpoint_test.go b/users/api/endpoint_test.go deleted file mode 100644 index 3ab1d4db3b..0000000000 --- a/users/api/endpoint_test.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package api_test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "regexp" - "strings" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - "github.com/mainflux/mainflux/users/api" - "github.com/mainflux/mainflux/users/bcrypt" - "github.com/mainflux/mainflux/users/mocks" - "github.com/opentracing/opentracing-go/mocktracer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - contentType = "application/json" - validEmail = "user@example.com" - invalidEmail = "userexample.com" - validPass = "password" - invalidPass = "wrong" - memberRelationKey = "member" - authoritiesObjKey = "authorities" -) - -var ( - user = users.User{Email: validEmail, Password: validPass} - notFoundRes = toJSON(apiutil.ErrorRes{Err: errors.ErrNotFound.Error()}) - unauthRes = toJSON(apiutil.ErrorRes{Err: errors.ErrAuthentication.Error()}) - malformedRes = toJSON(apiutil.ErrorRes{Err: errors.ErrMalformedEntity.Error()}) - weakPassword = toJSON(apiutil.ErrorRes{Err: users.ErrPasswordFormat.Error()}) - unsupportedRes = toJSON(apiutil.ErrorRes{Err: errors.ErrUnsupportedContentType.Error()}) - missingTokRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrBearerToken.Error()}) - missingEmailRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrMissingEmail.Error()}) - missingPassRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrMissingPass.Error()}) - invalidRestPassRes = toJSON(apiutil.ErrorRes{Err: apiutil.ErrInvalidResetPass.Error()}) - passRegex = regexp.MustCompile("^.{8,}$") -) - -type testRequest struct { - client *http.Client - method string - url string - contentType string - token string - body io.Reader -} - -func (tr testRequest) make() (*http.Response, error) { - req, err := http.NewRequest(tr.method, tr.url, tr.body) - if err != nil { - return nil, err - } - if tr.token != "" { - req.Header.Set("Authorization", apiutil.BearerPrefix+tr.token) - } - if tr.contentType != "" { - req.Header.Set("Content-Type", tr.contentType) - } - - req.Header.Set("Referer", "http://localhost") - return tr.client.Do(req) -} - -func newService() users.Service { - usersRepo := mocks.NewUserRepository() - hasher := bcrypt.New() - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - email := mocks.NewEmailer() - idProvider := uuid.New() - - return users.New(usersRepo, hasher, auth, email, idProvider, passRegex) -} - -func newServer(svc users.Service) *httptest.Server { - logger := logger.NewMock() - mux := api.MakeHandler(svc, mocktracer.New(), logger) - return httptest.NewServer(mux) -} - -func toJSON(data interface{}) string { - jsonData, _ := json.Marshal(data) - return string(jsonData) -} - -func TestRegister(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - data := toJSON(user) - userNew := toJSON(users.User{Email: "user2@example.com", Password: "password"}) - invalidData := toJSON(users.User{Email: invalidEmail, Password: validPass}) - invalidPasswordData := toJSON(users.User{Email: validEmail, Password: invalidPass}) - invalidFieldData := fmt.Sprintf(`{"email": "%s", "pass": "%s"}`, user.Email, user.Password) - emptyEmailData := `{"email": ""}` - emptyHostData := fmt.Sprintf(`{"email": "%s"}`, user.Email) - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - token := tkn.GetValue() - - cases := []struct { - desc string - req string - contentType string - statusCode int - token string - }{ - { - desc: "register new user", - req: data, - contentType: contentType, - statusCode: http.StatusCreated, - token: token, - }, - { - desc: "register user with empty token", - req: data, - contentType: contentType, - statusCode: http.StatusUnauthorized, - token: "", - }, - { - desc: "register existing user", - req: data, - contentType: contentType, - statusCode: http.StatusConflict, - token: token, - }, - { - desc: "register user with invalid email address", - req: invalidData, - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with weak password", - req: invalidPasswordData, - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register new user with unauthenticated access", - req: userNew, - contentType: contentType, - statusCode: http.StatusUnauthorized, - token: "wrong", - }, - { - desc: "register existing user with unauthenticated access", - req: data, - contentType: contentType, - statusCode: http.StatusUnauthorized, - token: "wrong", - }, - { - desc: "register user with invalid request format", - req: "{", - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with empty email request", - req: emptyEmailData, - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with empty host request", - req: emptyHostData, - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with empty request", - req: "", - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with invalid field name", - req: invalidFieldData, - contentType: contentType, - statusCode: http.StatusBadRequest, - token: token, - }, - { - desc: "register user with missing content type", - req: data, - contentType: "", - statusCode: http.StatusUnsupportedMediaType, - token: token, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPost, - url: fmt.Sprintf("%s/users", ts.URL), - contentType: tc.contentType, - token: tc.token, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - } -} - -func TestLogin(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - token := tkn.GetValue() - tokenData := toJSON(map[string]string{"token": token}) - data := toJSON(user) - invalidEmailData := toJSON(users.User{ - Email: invalidEmail, - Password: validPass, - }) - invalidData := toJSON(users.User{ - Email: validEmail, - Password: "invalid_password", - }) - nonexistentData := toJSON(users.User{ - Email: "non-existentuser@example.com", - Password: validPass, - }) - _, err := svc.Register(context.Background(), token, user) - require.Nil(t, err, fmt.Sprintf("register user got unexpected error: %s", err)) - - cases := []struct { - desc string - req string - contentType string - statusCode int - res string - }{ - { - desc: "login with valid credentials", - req: data, - contentType: contentType, - statusCode: http.StatusCreated, - res: tokenData, - }, - { - desc: "login with invalid credentials", - req: invalidData, - contentType: contentType, - statusCode: http.StatusUnauthorized, - res: unauthRes, - }, - { - desc: "login with invalid email address", - req: invalidEmailData, - contentType: contentType, - statusCode: http.StatusBadRequest, - res: malformedRes, - }, - { - desc: "login non-existent user", - req: nonexistentData, - contentType: contentType, - statusCode: http.StatusUnauthorized, - res: unauthRes, - }, - { - desc: "login with invalid request format", - req: "{", - contentType: contentType, - statusCode: http.StatusBadRequest, - res: malformedRes, - }, - { - desc: "login with empty JSON request", - req: "{}", - contentType: contentType, - statusCode: http.StatusBadRequest, - res: malformedRes, - }, - { - desc: "login with empty request", - req: "", - contentType: contentType, - statusCode: http.StatusBadRequest, - res: malformedRes, - }, - { - desc: "login with missing content type", - req: data, - contentType: "", - statusCode: http.StatusUnsupportedMediaType, - res: unsupportedRes, - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPost, - url: fmt.Sprintf("%s/tokens", ts.URL), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - token := strings.Trim(string(body), "\n") - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, token, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, token)) - } -} - -func TestUser(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - token := tkn.GetValue() - - userID, err := svc.Register(context.Background(), token, user) - require.Nil(t, err, fmt.Sprintf("register user got unexpected error: %s", err)) - - cases := []struct { - desc string - token string - statusCode int - res string - }{ - { - desc: "user info with valid token", - token: token, - statusCode: http.StatusOK, - res: "", - }, - { - desc: "user info with invalid token", - token: "", - statusCode: http.StatusUnauthorized, - res: "", - }, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodGet, - url: fmt.Sprintf("%s/users/%s", ts.URL, userID), - token: tc.token, - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - token := strings.Trim(string(body), "\n") - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, "", fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, token)) - } -} - -func TestPasswordResetRequest(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - data := toJSON(user) - - nonexistentData := toJSON(users.User{ - Email: "non-existentuser@example.com", - Password: validPass, - }) - - expectedExisting := toJSON(struct { - Msg string `json:"msg"` - }{ - api.MailSent, - }) - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - token := tkn.GetValue() - - _, err := svc.Register(context.Background(), token, user) - require.Nil(t, err, fmt.Sprintf("register user got unexpected error: %s", err)) - - cases := []struct { - desc string - req string - contentType string - statusCode int - res string - }{ - {"password reset request with valid email", data, contentType, http.StatusCreated, expectedExisting}, - {"password reset request with invalid email", nonexistentData, contentType, http.StatusNotFound, notFoundRes}, - {"password reset request with invalid request format", "{", contentType, http.StatusBadRequest, malformedRes}, - {"password reset request with empty email request", "{}", contentType, http.StatusBadRequest, missingEmailRes}, - {"password reset request with empty request", "", contentType, http.StatusBadRequest, malformedRes}, - {"password reset request with missing content type", data, "", http.StatusUnsupportedMediaType, unsupportedRes}, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPost, - url: fmt.Sprintf("%s/password/reset-request", ts.URL), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - token := strings.Trim(string(body), "\n") - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, token, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, token)) - } -} - -func TestPasswordReset(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - reqData := struct { - Token string `json:"token,omitempty"` - Password string `json:"password,omitempty"` - ConfPass string `json:"confirm_password,omitempty"` - }{} - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - - tkn, err := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - assert.Nil(t, err, fmt.Sprintf("issue user token error: %s", err)) - - token := tkn.GetValue() - - _, err = svc.Register(context.Background(), token, user) - require.Nil(t, err, fmt.Sprintf("register user got unexpected error: %s", err)) - - reqData.Password = user.Password - reqData.ConfPass = user.Password - reqData.Token = token - reqExisting := toJSON(reqData) - - reqData.Token = "wrong" - - reqNoExist := toJSON(reqData) - - reqData.Token = token - - reqData.ConfPass = invalidPass - reqPassNoMatch := toJSON(reqData) - - reqData.Password = invalidPass - reqPassWeak := toJSON(reqData) - - cases := []struct { - desc string - req string - contentType string - statusCode int - res string - tok string - }{ - {"password reset with valid token", reqExisting, contentType, http.StatusCreated, "{}", token}, - {"password reset with invalid token", reqNoExist, contentType, http.StatusUnauthorized, unauthRes, token}, - {"password reset with confirm password not matching", reqPassNoMatch, contentType, http.StatusBadRequest, invalidRestPassRes, token}, - {"password reset request with invalid request format", "{", contentType, http.StatusBadRequest, malformedRes, token}, - {"password reset request with empty JSON request", "{}", contentType, http.StatusBadRequest, missingPassRes, token}, - {"password reset request with empty request", "", contentType, http.StatusBadRequest, malformedRes, token}, - {"password reset request with missing content type", reqExisting, "", http.StatusUnsupportedMediaType, unsupportedRes, token}, - {"password reset with weak password", reqPassWeak, contentType, http.StatusBadRequest, weakPassword, token}, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPut, - url: fmt.Sprintf("%s/password/reset", ts.URL), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - token := strings.Trim(string(body), "\n") - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, token, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, token)) - } -} - -func TestPasswordChange(t *testing.T) { - svc := newService() - ts := newServer(svc) - defer ts.Close() - client := ts.Client() - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: authoritiesObjKey, Relation: memberRelationKey}) - - auth := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - - tkn, _ := auth.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 0}) - token := tkn.GetValue() - - reqData := struct { - Token string `json:"token,omitempty"` - Password string `json:"password,omitempty"` - OldPassw string `json:"old_password,omitempty"` - }{} - - _, err := svc.Register(context.Background(), token, user) - require.Nil(t, err, fmt.Sprintf("register user got unexpected error: %s", err)) - - reqData.Password = user.Password - reqData.OldPassw = user.Password - reqData.Token = token - dataResExisting := toJSON(reqData) - - reqNoExist := toJSON(reqData) - - reqData.OldPassw = invalidPass - reqWrongPass := toJSON(reqData) - - reqData.OldPassw = user.Password - reqData.Password = invalidPass - reqWeakPass := toJSON(reqData) - - cases := []struct { - desc string - req string - contentType string - statusCode int - res string - tok string - }{ - {"password change with valid token", dataResExisting, contentType, http.StatusCreated, "{}", token}, - {"password change with empty token", reqNoExist, contentType, http.StatusUnauthorized, missingTokRes, ""}, - {"password change with invalid old password", reqWrongPass, contentType, http.StatusUnauthorized, unauthRes, token}, - {"password change with invalid new password", reqWeakPass, contentType, http.StatusBadRequest, weakPassword, token}, - {"password change with empty JSON request", "{}", contentType, http.StatusBadRequest, missingPassRes, token}, - {"password change empty request", "", contentType, http.StatusBadRequest, malformedRes, token}, - {"password change missing content type", dataResExisting, "", http.StatusUnsupportedMediaType, unsupportedRes, token}, - } - - for _, tc := range cases { - req := testRequest{ - client: client, - method: http.MethodPatch, - url: fmt.Sprintf("%s/password", ts.URL), - contentType: tc.contentType, - body: strings.NewReader(tc.req), - token: tc.tok, - } - - res, err := req.make() - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - body, err := ioutil.ReadAll(res.Body) - assert.Nil(t, err, fmt.Sprintf("%s: unexpected error %s", tc.desc, err)) - token := strings.Trim(string(body), "\n") - - assert.Equal(t, tc.statusCode, res.StatusCode, fmt.Sprintf("%s: expected status code %d got %d", tc.desc, tc.statusCode, res.StatusCode)) - assert.Equal(t, tc.res, token, fmt.Sprintf("%s: expected body %s got %s", tc.desc, tc.res, token)) - } -} diff --git a/users/api/logging.go b/users/api/logging.go deleted file mode 100644 index 718f108f85..0000000000 --- a/users/api/logging.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "fmt" - "time" - - log "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/users" -) - -var _ users.Service = (*loggingMiddleware)(nil) - -type loggingMiddleware struct { - logger log.Logger - svc users.Service -} - -// LoggingMiddleware adds logging facilities to the core service. -func LoggingMiddleware(svc users.Service, logger log.Logger) users.Service { - return &loggingMiddleware{logger, svc} -} - -func (lm *loggingMiddleware) Register(ctx context.Context, token string, user users.User) (uid string, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method register for user %s took %s to complete", user.Email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - - }(time.Now()) - - return lm.svc.Register(ctx, token, user) -} - -func (lm *loggingMiddleware) Login(ctx context.Context, user users.User) (token string, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method login for user %s took %s to complete", user.Email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.Login(ctx, user) -} - -func (lm *loggingMiddleware) ViewUser(ctx context.Context, token, id string) (u users.User, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method view_user for user %s took %s to complete", u.Email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ViewUser(ctx, token, id) -} - -func (lm *loggingMiddleware) ViewProfile(ctx context.Context, token string) (u users.User, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method view_profile for user %s took %s to complete", u.Email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ViewProfile(ctx, token) -} - -func (lm *loggingMiddleware) ListUsers(ctx context.Context, token string, pm users.PageMetadata) (e users.UserPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_users for token %s took %s to complete", token, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListUsers(ctx, token, pm) -} - -func (lm *loggingMiddleware) UpdateUser(ctx context.Context, token string, u users.User) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method update_user for user %s took %s to complete", u.Email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.UpdateUser(ctx, token, u) -} - -func (lm *loggingMiddleware) GenerateResetToken(ctx context.Context, email, host string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method generate_reset_token for user %s took %s to complete", email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.GenerateResetToken(ctx, email, host) -} - -func (lm *loggingMiddleware) ChangePassword(ctx context.Context, email, password, oldPassword string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method change_password for user %s took %s to complete", email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ChangePassword(ctx, email, password, oldPassword) -} - -func (lm *loggingMiddleware) ResetPassword(ctx context.Context, email, password string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method reset_password for user %s took %s to complete", email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ResetPassword(ctx, email, password) -} - -func (lm *loggingMiddleware) SendPasswordReset(ctx context.Context, host, email, token string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method send_password_reset for user %s took %s to complete", email, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.SendPasswordReset(ctx, host, email, token) -} - -func (lm *loggingMiddleware) ListMembers(ctx context.Context, token, groupID string, pm users.PageMetadata) (mp users.UserPage, err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method list_members for group %s took %s to complete", groupID, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.ListMembers(ctx, token, groupID, pm) -} - -func (lm *loggingMiddleware) EnableUser(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method enable_user for user %s took %s to complete", id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.EnableUser(ctx, token, id) -} - -func (lm *loggingMiddleware) DisableUser(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - message := fmt.Sprintf("Method disable_user for user %s took %s to complete", id, time.Since(begin)) - if err != nil { - lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) - return - } - lm.logger.Info(fmt.Sprintf("%s without errors.", message)) - }(time.Now()) - - return lm.svc.DisableUser(ctx, token, id) -} diff --git a/users/api/metrics.go b/users/api/metrics.go deleted file mode 100644 index 02d8558bef..0000000000 --- a/users/api/metrics.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -//go:build !test - -package api - -import ( - "context" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/mainflux/mainflux/users" -) - -var _ users.Service = (*metricsMiddleware)(nil) - -type metricsMiddleware struct { - counter metrics.Counter - latency metrics.Histogram - svc users.Service -} - -// MetricsMiddleware instruments core service by tracking request count and latency. -func MetricsMiddleware(svc users.Service, counter metrics.Counter, latency metrics.Histogram) users.Service { - return &metricsMiddleware{ - counter: counter, - latency: latency, - svc: svc, - } -} - -func (ms *metricsMiddleware) Register(ctx context.Context, token string, user users.User) (string, error) { - defer func(begin time.Time) { - ms.counter.With("method", "register").Add(1) - ms.latency.With("method", "register").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Register(ctx, token, user) -} - -func (ms *metricsMiddleware) Login(ctx context.Context, user users.User) (string, error) { - defer func(begin time.Time) { - ms.counter.With("method", "login").Add(1) - ms.latency.With("method", "login").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.Login(ctx, user) -} - -func (ms *metricsMiddleware) ViewUser(ctx context.Context, token, id string) (users.User, error) { - defer func(begin time.Time) { - ms.counter.With("method", "view_user").Add(1) - ms.latency.With("method", "view_user").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ViewUser(ctx, token, id) -} - -func (ms *metricsMiddleware) ViewProfile(ctx context.Context, token string) (users.User, error) { - defer func(begin time.Time) { - ms.counter.With("method", "view_profile").Add(1) - ms.latency.With("method", "view_profile").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ViewProfile(ctx, token) -} - -func (ms *metricsMiddleware) ListUsers(ctx context.Context, token string, pm users.PageMetadata) (users.UserPage, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_users").Add(1) - ms.latency.With("method", "list_users").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListUsers(ctx, token, pm) -} - -func (ms *metricsMiddleware) UpdateUser(ctx context.Context, token string, u users.User) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "update_user").Add(1) - ms.latency.With("method", "update_user").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.UpdateUser(ctx, token, u) -} - -func (ms *metricsMiddleware) GenerateResetToken(ctx context.Context, email, host string) error { - defer func(begin time.Time) { - ms.counter.With("method", "generate_reset_token").Add(1) - ms.latency.With("method", "generate_reset_token").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.GenerateResetToken(ctx, email, host) -} - -func (ms *metricsMiddleware) ChangePassword(ctx context.Context, email, password, oldPassword string) error { - defer func(begin time.Time) { - ms.counter.With("method", "change_password").Add(1) - ms.latency.With("method", "change_password").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ChangePassword(ctx, email, password, oldPassword) -} - -func (ms *metricsMiddleware) ResetPassword(ctx context.Context, email, password string) error { - defer func(begin time.Time) { - ms.counter.With("method", "reset_password").Add(1) - ms.latency.With("method", "reset_password").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ResetPassword(ctx, email, password) -} - -func (ms *metricsMiddleware) SendPasswordReset(ctx context.Context, host, email, token string) error { - defer func(begin time.Time) { - ms.counter.With("method", "send_password_reset").Add(1) - ms.latency.With("method", "send_password_reset").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.SendPasswordReset(ctx, host, email, token) -} - -func (ms *metricsMiddleware) ListMembers(ctx context.Context, token, groupID string, pm users.PageMetadata) (users.UserPage, error) { - defer func(begin time.Time) { - ms.counter.With("method", "list_members").Add(1) - ms.latency.With("method", "list_members").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.ListMembers(ctx, token, groupID, pm) -} - -func (ms *metricsMiddleware) EnableUser(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "enable_user").Add(1) - ms.latency.With("method", "enable_user").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.EnableUser(ctx, token, id) -} - -func (ms *metricsMiddleware) DisableUser(ctx context.Context, token string, id string) (err error) { - defer func(begin time.Time) { - ms.counter.With("method", "disable_user").Add(1) - ms.latency.With("method", "disable_user").Observe(time.Since(begin).Seconds()) - }(time.Now()) - - return ms.svc.DisableUser(ctx, token, id) -} diff --git a/users/api/requests.go b/users/api/requests.go deleted file mode 100644 index e3e2fcba22..0000000000 --- a/users/api/requests.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/users" -) - -const ( - maxLimitSize = 100 - maxEmailSize = 1024 -) - -type userReq struct { - user users.User -} - -func (req userReq) validate() error { - return req.user.Validate() -} - -type createUserReq struct { - user users.User - token string -} - -func (req createUserReq) validate() error { - return req.user.Validate() -} - -type viewUserReq struct { - token string - id string -} - -func (req viewUserReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - return nil -} - -type listUsersReq struct { - token string - status string - offset uint64 - limit uint64 - email string - metadata users.Metadata -} - -func (req listUsersReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.limit > maxLimitSize || req.limit < 1 { - return apiutil.ErrLimitSize - } - - if len(req.email) > maxEmailSize { - return apiutil.ErrEmailSize - } - if req.status != users.AllStatusKey && - req.status != users.EnabledStatusKey && - req.status != users.DisabledStatusKey { - return apiutil.ErrInvalidStatus - } - - return nil -} - -type updateUserReq struct { - token string - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (req updateUserReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - return nil -} - -type passwResetReq struct { - Email string `json:"email"` - Host string `json:"host"` -} - -func (req passwResetReq) validate() error { - if req.Email == "" { - return apiutil.ErrMissingEmail - } - - if req.Host == "" { - return apiutil.ErrMissingHost - } - - return nil -} - -type resetTokenReq struct { - Token string `json:"token"` - Password string `json:"password"` - ConfPass string `json:"confirm_password"` -} - -func (req resetTokenReq) validate() error { - if req.Password == "" { - return apiutil.ErrMissingPass - } - - if req.ConfPass == "" { - return apiutil.ErrMissingConfPass - } - - if req.Token == "" { - return apiutil.ErrBearerToken - } - - if req.Password != req.ConfPass { - return apiutil.ErrInvalidResetPass - } - - return nil -} - -type passwChangeReq struct { - token string - Password string `json:"password"` - OldPassword string `json:"old_password"` -} - -func (req passwChangeReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if req.OldPassword == "" { - return apiutil.ErrMissingPass - } - return nil -} - -type listMemberGroupReq struct { - token string - status string - offset uint64 - limit uint64 - metadata users.Metadata - id string -} - -func (req listMemberGroupReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - - if req.id == "" { - return apiutil.ErrMissingID - } - if req.status != users.AllStatusKey && - req.status != users.EnabledStatusKey && - req.status != users.DisabledStatusKey { - return apiutil.ErrInvalidStatus - } - return nil -} - -type changeUserStatusReq struct { - token string - id string -} - -func (req changeUserStatusReq) validate() error { - if req.token == "" { - return apiutil.ErrBearerToken - } - if req.id == "" { - return apiutil.ErrMissingID - } - return nil -} diff --git a/users/api/responses.go b/users/api/responses.go deleted file mode 100644 index 969e4d32b3..0000000000 --- a/users/api/responses.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "fmt" - "net/http" - - "github.com/mainflux/mainflux" -) - -var ( - _ mainflux.Response = (*tokenRes)(nil) - _ mainflux.Response = (*viewUserRes)(nil) - _ mainflux.Response = (*passwChangeRes)(nil) - _ mainflux.Response = (*createUserRes)(nil) - _ mainflux.Response = (*deleteRes)(nil) -) - -// MailSent message response when link is sent -const MailSent = "Email with reset link is sent" - -type pageRes struct { - Total uint64 `json:"total"` - Offset uint64 `json:"offset"` - Limit uint64 `json:"limit"` -} - -type createUserRes struct { - ID string - created bool -} - -func (res createUserRes) Code() int { - if res.created { - return http.StatusCreated - } - - return http.StatusOK -} - -func (res createUserRes) Headers() map[string]string { - if res.created { - return map[string]string{ - "Location": fmt.Sprintf("/users/%s", res.ID), - } - } - - return map[string]string{} -} - -func (res createUserRes) Empty() bool { - return true -} - -type tokenRes struct { - Token string `json:"token,omitempty"` -} - -func (res tokenRes) Code() int { - return http.StatusCreated -} - -func (res tokenRes) Headers() map[string]string { - return map[string]string{} -} - -func (res tokenRes) Empty() bool { - return res.Token == "" -} - -type updateUserRes struct{} - -func (res updateUserRes) Code() int { - return http.StatusOK -} - -func (res updateUserRes) Headers() map[string]string { - return map[string]string{} -} - -func (res updateUserRes) Empty() bool { - return true -} - -type viewUserRes struct { - ID string `json:"id"` - Email string `json:"email"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (res viewUserRes) Code() int { - return http.StatusOK -} - -func (res viewUserRes) Headers() map[string]string { - return map[string]string{} -} - -func (res viewUserRes) Empty() bool { - return false -} - -type userPageRes struct { - pageRes - Users []viewUserRes `json:"users"` -} - -func (res userPageRes) Code() int { - return http.StatusOK -} - -func (res userPageRes) Headers() map[string]string { - return map[string]string{} -} - -func (res userPageRes) Empty() bool { - return false -} - -type passwResetReqRes struct { - Msg string `json:"msg"` -} - -func (res passwResetReqRes) Code() int { - return http.StatusCreated -} - -func (res passwResetReqRes) Headers() map[string]string { - return map[string]string{} -} - -func (res passwResetReqRes) Empty() bool { - return false -} - -type passwChangeRes struct { -} - -func (res passwChangeRes) Code() int { - return http.StatusCreated -} - -func (res passwChangeRes) Headers() map[string]string { - return map[string]string{} -} - -func (res passwChangeRes) Empty() bool { - return false -} - -type deleteRes struct{} - -func (res deleteRes) Code() int { - return http.StatusNoContent -} - -func (res deleteRes) Headers() map[string]string { - return map[string]string{} -} - -func (res deleteRes) Empty() bool { - return true -} diff --git a/users/api/transport.go b/users/api/transport.go deleted file mode 100644 index 5fb2886db1..0000000000 --- a/users/api/transport.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package api - -import ( - "context" - "encoding/json" - "net/http" - "strings" - - kitot "github.com/go-kit/kit/tracing/opentracing" - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-zoo/bone" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/logger" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - opentracing "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -const ( - contentType = "application/json" - offsetKey = "offset" - limitKey = "limit" - emailKey = "email" - metadataKey = "metadata" - statusKey = "status" - defOffset = 0 - defLimit = 10 -) - -// MakeHandler returns a HTTP handler for API endpoints. -func MakeHandler(svc users.Service, tracer opentracing.Tracer, logger logger.Logger) http.Handler { - opts := []kithttp.ServerOption{ - kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, encodeError)), - } - - mux := bone.New() - - mux.Post("/users", kithttp.NewServer( - kitot.TraceServer(tracer, "register")(registrationEndpoint(svc)), - decodeCreateUserReq, - encodeResponse, - opts..., - )) - - mux.Get("/users/profile", kithttp.NewServer( - kitot.TraceServer(tracer, "view_profile")(viewProfileEndpoint(svc)), - decodeViewProfile, - encodeResponse, - opts..., - )) - - mux.Get("/users/:userID", kithttp.NewServer( - kitot.TraceServer(tracer, "view_user")(viewUserEndpoint(svc)), - decodeViewUser, - encodeResponse, - opts..., - )) - - mux.Get("/users", kithttp.NewServer( - kitot.TraceServer(tracer, "list_users")(listUsersEndpoint(svc)), - decodeListUsers, - encodeResponse, - opts..., - )) - - mux.Put("/users", kithttp.NewServer( - kitot.TraceServer(tracer, "update_user")(updateUserEndpoint(svc)), - decodeUpdateUser, - encodeResponse, - opts..., - )) - - mux.Post("/password/reset-request", kithttp.NewServer( - kitot.TraceServer(tracer, "res-req")(passwordResetRequestEndpoint(svc)), - decodePasswordResetRequest, - encodeResponse, - opts..., - )) - - mux.Put("/password/reset", kithttp.NewServer( - kitot.TraceServer(tracer, "reset")(passwordResetEndpoint(svc)), - decodePasswordReset, - encodeResponse, - opts..., - )) - - mux.Patch("/password", kithttp.NewServer( - kitot.TraceServer(tracer, "reset")(passwordChangeEndpoint(svc)), - decodePasswordChange, - encodeResponse, - opts..., - )) - - mux.Get("/groups/:groupID", kithttp.NewServer( - kitot.TraceServer(tracer, "list_members")(listMembersEndpoint(svc)), - decodeListMembersRequest, - encodeResponse, - opts..., - )) - - mux.Post("/tokens", kithttp.NewServer( - kitot.TraceServer(tracer, "login")(loginEndpoint(svc)), - decodeCredentials, - encodeResponse, - opts..., - )) - - mux.Post("/users/:userID/enable", kithttp.NewServer( - kitot.TraceServer(tracer, "enable_user")(enableUserEndpoint(svc)), - decodeChangeUserStatus, - encodeResponse, - opts..., - )) - - mux.Post("/users/:userID/disable", kithttp.NewServer( - kitot.TraceServer(tracer, "disable_user")(disableUserEndpoint(svc)), - decodeChangeUserStatus, - encodeResponse, - opts..., - )) - - mux.GetFunc("/health", mainflux.Health("users")) - mux.Handle("/metrics", promhttp.Handler()) - - return mux -} - -func decodeViewUser(_ context.Context, r *http.Request) (interface{}, error) { - req := viewUserReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "userID"), - } - - return req, nil -} - -func decodeViewProfile(_ context.Context, r *http.Request) (interface{}, error) { - req := viewUserReq{token: apiutil.ExtractBearerToken(r)} - - return req, nil -} - -func decodeListUsers(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - e, err := apiutil.ReadStringQuery(r, emailKey, "") - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - - s, err := apiutil.ReadStringQuery(r, statusKey, users.EnabledStatusKey) - if err != nil { - return nil, err - } - req := listUsersReq{ - token: apiutil.ExtractBearerToken(r), - status: s, - offset: o, - limit: l, - email: e, - metadata: m, - } - return req, nil -} - -func decodeUpdateUser(_ context.Context, r *http.Request) (interface{}, error) { - req := updateUserReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeCredentials(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - var user users.User - if err := json.NewDecoder(r.Body).Decode(&user); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - user.Email = strings.TrimSpace(user.Email) - return userReq{user}, nil -} - -func decodeCreateUserReq(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - var user users.User - if err := json.NewDecoder(r.Body).Decode(&user); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - user.Email = strings.TrimSpace(user.Email) - req := createUserReq{ - user: user, - token: apiutil.ExtractBearerToken(r), - } - - return req, nil -} - -func decodePasswordResetRequest(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - var req passwResetReq - - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - req.Host = r.Header.Get("Referer") - return req, nil -} - -func decodePasswordReset(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - var req resetTokenReq - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodePasswordChange(_ context.Context, r *http.Request) (interface{}, error) { - if !strings.Contains(r.Header.Get("Content-Type"), contentType) { - return nil, errors.ErrUnsupportedContentType - } - - req := passwChangeReq{token: apiutil.ExtractBearerToken(r)} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return nil, errors.Wrap(errors.ErrMalformedEntity, err) - } - - return req, nil -} - -func decodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { - o, err := apiutil.ReadUintQuery(r, offsetKey, defOffset) - if err != nil { - return nil, err - } - - l, err := apiutil.ReadUintQuery(r, limitKey, defLimit) - if err != nil { - return nil, err - } - - m, err := apiutil.ReadMetadataQuery(r, metadataKey, nil) - if err != nil { - return nil, err - } - s, err := apiutil.ReadStringQuery(r, statusKey, users.EnabledStatusKey) - if err != nil { - return nil, err - } - - req := listMemberGroupReq{ - token: apiutil.ExtractBearerToken(r), - status: s, - id: bone.GetValue(r, "groupID"), - offset: o, - limit: l, - metadata: m, - } - return req, nil -} - -func decodeChangeUserStatus(_ context.Context, r *http.Request) (interface{}, error) { - req := changeUserStatusReq{ - token: apiutil.ExtractBearerToken(r), - id: bone.GetValue(r, "userID"), - } - - return req, nil -} - -func encodeResponse(_ context.Context, w http.ResponseWriter, response interface{}) error { - if ar, ok := response.(mainflux.Response); ok { - for k, v := range ar.Headers() { - w.Header().Set(k, v) - } - w.Header().Set("Content-Type", contentType) - w.WriteHeader(ar.Code()) - - if ar.Empty() { - return nil - } - } - - return json.NewEncoder(w).Encode(response) -} - -func encodeError(_ context.Context, err error, w http.ResponseWriter) { - switch { - case errors.Contains(err, errors.ErrInvalidQueryParams), - errors.Contains(err, errors.ErrMalformedEntity), - errors.Contains(err, users.ErrPasswordFormat), - err == apiutil.ErrMissingEmail, - err == apiutil.ErrMissingHost, - err == apiutil.ErrMissingPass, - err == apiutil.ErrMissingConfPass, - err == apiutil.ErrLimitSize, - err == apiutil.ErrOffsetSize, - err == apiutil.ErrInvalidResetPass: - w.WriteHeader(http.StatusBadRequest) - case errors.Contains(err, errors.ErrAuthentication), - err == apiutil.ErrBearerToken: - w.WriteHeader(http.StatusUnauthorized) - case errors.Contains(err, errors.ErrAuthorization): - w.WriteHeader(http.StatusForbidden) - case errors.Contains(err, errors.ErrConflict): - w.WriteHeader(http.StatusConflict) - case errors.Contains(err, errors.ErrUnsupportedContentType): - w.WriteHeader(http.StatusUnsupportedMediaType) - case errors.Contains(err, errors.ErrNotFound): - w.WriteHeader(http.StatusNotFound) - - case errors.Contains(err, uuid.ErrGeneratingID), - errors.Contains(err, users.ErrRecoveryToken): - w.WriteHeader(http.StatusInternalServerError) - - case errors.Contains(err, errors.ErrCreateEntity), - errors.Contains(err, errors.ErrUpdateEntity), - errors.Contains(err, errors.ErrViewEntity), - errors.Contains(err, errors.ErrRemoveEntity): - w.WriteHeader(http.StatusInternalServerError) - - default: - w.WriteHeader(http.StatusInternalServerError) - } - - if errorVal, ok := err.(errors.Error); ok { - w.Header().Set("Content-Type", contentType) - if err := json.NewEncoder(w).Encode(apiutil.ErrorRes{Err: errorVal.Msg()}); err != nil { - w.WriteHeader(http.StatusInternalServerError) - } - } -} diff --git a/users/bcrypt/hasher.go b/users/bcrypt/hasher.go deleted file mode 100644 index 590a3831ae..0000000000 --- a/users/bcrypt/hasher.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package bcrypt provides a hasher implementation utilizing bcrypt. -package bcrypt - -import ( - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" - "golang.org/x/crypto/bcrypt" -) - -const cost int = 10 - -var ( - errHashPassword = errors.New("Generate hash from password failed") - errComparePassword = errors.New("Compare hash and password failed") -) - -var _ users.Hasher = (*bcryptHasher)(nil) - -type bcryptHasher struct{} - -// New instantiates a bcrypt-based hasher implementation. -func New() users.Hasher { - return &bcryptHasher{} -} - -func (bh *bcryptHasher) Hash(pwd string) (string, error) { - hash, err := bcrypt.GenerateFromPassword([]byte(pwd), cost) - if err != nil { - return "", errors.Wrap(errHashPassword, err) - } - - return string(hash), nil -} - -func (bh *bcryptHasher) Compare(plain, hashed string) error { - err := bcrypt.CompareHashAndPassword([]byte(hashed), []byte(plain)) - if err != nil { - return errors.Wrap(errComparePassword, err) - } - return nil -} diff --git a/users/clients/api/endpoints.go b/users/clients/api/endpoints.go new file mode 100644 index 0000000000..7e46add5f2 --- /dev/null +++ b/users/clients/api/endpoints.go @@ -0,0 +1,322 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/users/clients" +) + +func registrationEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createClientReq) + if err := req.validate(); err != nil { + return createClientRes{}, err + } + client, err := svc.RegisterClient(ctx, req.token, req.client) + if err != nil { + return createClientRes{}, err + } + ucr := createClientRes{ + Client: client, + created: true, + } + + return ucr, nil + } +} + +func viewClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewClientReq) + if err := req.validate(); err != nil { + return nil, err + } + + client, err := svc.ViewClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return viewClientRes{Client: client}, nil + } +} + +func viewProfileEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(viewProfileReq) + if err := req.validate(); err != nil { + return nil, err + } + + client, err := svc.ViewProfile(ctx, req.token) + if err != nil { + return nil, err + } + return viewClientRes{ + Client: client, + }, nil + } +} + +func listClientsEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listClientsReq) + if err := req.validate(); err != nil { + return mfclients.ClientsPage{}, err + } + + pm := mfclients.Page{ + SharedBy: req.sharedBy, + Status: req.status, + Offset: req.offset, + Limit: req.limit, + Owner: req.owner, + Name: req.name, + Tag: req.tag, + Metadata: req.metadata, + Identity: req.identity, + } + page, err := svc.ListClients(ctx, req.token, pm) + if err != nil { + return mfclients.ClientsPage{}, err + } + + res := clientsPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Clients: []viewClientRes{}, + } + for _, client := range page.Clients { + res.Clients = append(res.Clients, viewClientRes{Client: client}) + } + + return res, nil + } +} + +func listMembersEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMembersReq) + if err := req.validate(); err != nil { + return memberPageRes{}, err + } + page, err := svc.ListMembers(ctx, req.token, req.groupID, req.Page) + if err != nil { + return memberPageRes{}, err + } + return buildMembersResponse(page), nil + } +} + +func updateClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientReq) + if err := req.validate(); err != nil { + return nil, err + } + + client := mfclients.Client{ + ID: req.id, + Name: req.Name, + Metadata: req.Metadata, + } + client, err := svc.UpdateClient(ctx, req.token, client) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func updateClientTagsEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientTagsReq) + if err := req.validate(); err != nil { + return nil, err + } + + client := mfclients.Client{ + ID: req.id, + Tags: req.Tags, + } + client, err := svc.UpdateClientTags(ctx, req.token, client) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func updateClientIdentityEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientIdentityReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.UpdateClientIdentity(ctx, req.token, req.id, req.Identity) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +// Password reset request endpoint. +// When successful password reset link is generated. +// Link is generated using MF_TOKEN_RESET_ENDPOINT env. +// and value from Referer header for host. +// {Referer}+{MF_TOKEN_RESET_ENDPOINT}+{token=TOKEN} +// http://mainflux.com/reset-request?token=xxxxxxxxxxx. +// Email with a link is being sent to the user. +// When user clicks on a link it should get the ui with form to +// enter new password, when form is submitted token and new password +// must be sent as PUT request to 'password/reset' passwordResetEndpoint +func passwordResetRequestEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(passwResetReq) + if err := req.validate(); err != nil { + return nil, err + } + if err := svc.GenerateResetToken(ctx, req.Email, req.Host); err != nil { + return nil, err + } + + return passwResetReqRes{Msg: MailSent}, nil + } +} + +// This is endpoint that actually sets new password in password reset flow. +// When user clicks on a link in email finally ends on this endpoint as explained in +// the comment above. +func passwordResetEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(resetTokenReq) + if err := req.validate(); err != nil { + return nil, err + } + if err := svc.ResetSecret(ctx, req.Token, req.Password); err != nil { + return nil, err + } + return passwChangeRes{}, nil + } +} + +func updateClientSecretEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientSecretReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.UpdateClientSecret(ctx, req.token, req.OldSecret, req.NewSecret) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func updateClientOwnerEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateClientOwnerReq) + if err := req.validate(); err != nil { + return nil, err + } + + client := mfclients.Client{ + ID: req.id, + Owner: req.Owner, + } + + client, err := svc.UpdateClientOwner(ctx, req.token, client) + if err != nil { + return nil, err + } + return updateClientRes{Client: client}, nil + } +} + +func issueTokenEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(loginClientReq) + if err := req.validate(); err != nil { + return nil, err + } + + token, err := svc.IssueToken(ctx, req.Identity, req.Secret) + if err != nil { + return nil, err + } + return tokenRes{ + AccessToken: token.AccessToken, + RefreshToken: token.RefreshToken, + AccessType: token.AccessType, + }, nil + } +} + +func refreshTokenEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(tokenReq) + if err := req.validate(); err != nil { + return nil, err + } + + token, err := svc.RefreshToken(ctx, req.RefreshToken) + if err != nil { + return nil, err + } + + return tokenRes{ + AccessToken: token.AccessToken, + RefreshToken: token.RefreshToken, + AccessType: token.AccessType, + }, nil + } +} + +func enableClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeClientStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.EnableClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return deleteClientRes{Client: client}, nil + } +} + +func disableClientEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeClientStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + client, err := svc.DisableClient(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return deleteClientRes{Client: client}, nil + } +} + +func buildMembersResponse(cp mfclients.MembersPage) memberPageRes { + res := memberPageRes{ + pageRes: pageRes{ + Total: cp.Total, + Offset: cp.Offset, + Limit: cp.Limit, + }, + Members: []viewMembersRes{}, + } + for _, client := range cp.Members { + res.Members = append(res.Members, viewMembersRes{Client: client}) + } + return res +} diff --git a/users/clients/api/logging.go b/users/clients/api/logging.go new file mode 100644 index 0000000000..883dc24346 --- /dev/null +++ b/users/clients/api/logging.go @@ -0,0 +1,238 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/jwt" +) + +var _ clients.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc clients.Service +} + +func LoggingMiddleware(svc clients.Service, logger mflog.Logger) clients.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) RegisterClient(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method register_client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.RegisterClient(ctx, token, client) +} + +func (lm *loggingMiddleware) IssueToken(ctx context.Context, identity, secret string) (t jwt.Token, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method issue_token of type %s for client %s took %s to complete", t.AccessType, identity, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.IssueToken(ctx, identity, secret) +} + +func (lm *loggingMiddleware) RefreshToken(ctx context.Context, refreshToken string) (t jwt.Token, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method refresh_token of type %s for token %s took %s to complete", t.AccessType, refreshToken, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.RefreshToken(ctx, refreshToken) +} + +func (lm *loggingMiddleware) ViewClient(ctx context.Context, token, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method view_client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ViewClient(ctx, token, id) +} + +func (lm *loggingMiddleware) ViewProfile(ctx context.Context, token string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method view_profile with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ViewProfile(ctx, token) +} + +func (lm *loggingMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (cp mfclients.ClientsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_clients %d clients using token %s took %s to complete", cp.Total, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListClients(ctx, token, pm) +} + +func (lm *loggingMiddleware) UpdateClient(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_client_name_and_metadata for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClient(ctx, token, client) +} + +func (lm *loggingMiddleware) UpdateClientTags(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_client_tags for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientTags(ctx, token, client) +} +func (lm *loggingMiddleware) UpdateClientIdentity(ctx context.Context, token, id, identity string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_client_identity for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientIdentity(ctx, token, id, identity) +} + +func (lm *loggingMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_client_secret for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) +} + +func (lm *loggingMiddleware) GenerateResetToken(ctx context.Context, email, host string) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method generate_reset_token for email %s took %s to complete", email, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.GenerateResetToken(ctx, email, host) +} + +func (lm *loggingMiddleware) ResetSecret(ctx context.Context, token, secret string) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method reset_secret using token %s took %s to complete", token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ResetSecret(ctx, token, secret) +} + +func (lm *loggingMiddleware) SendPasswordReset(ctx context.Context, host, email, user, token string) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method send_password_reset using token %s took %s to complete", token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.SendPasswordReset(ctx, host, email, user, token) +} + +func (lm *loggingMiddleware) UpdateClientOwner(ctx context.Context, token string, client mfclients.Client) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_client_owner for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateClientOwner(ctx, token, client) +} + +func (lm *loggingMiddleware) EnableClient(ctx context.Context, token, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method enable_client for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.EnableClient(ctx, token, id) +} + +func (lm *loggingMiddleware) DisableClient(ctx context.Context, token, id string) (c mfclients.Client, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method disable_client for client with id %s using token %s took %s to complete", c.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DisableClient(ctx, token, id) +} + +func (lm *loggingMiddleware) ListMembers(ctx context.Context, token, groupID string, cp mfclients.Page) (mp mfclients.MembersPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_members %d members for group with id %s and token %s took %s to complete", mp.Total, groupID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListMembers(ctx, token, groupID, cp) +} + +func (lm *loggingMiddleware) Identify(ctx context.Context, token string) (id string, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method identify for token %s with id %s took %s to complete", token, id, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.Identify(ctx, token) +} diff --git a/users/clients/api/metrics.go b/users/clients/api/metrics.go new file mode 100644 index 0000000000..12a0a67147 --- /dev/null +++ b/users/clients/api/metrics.go @@ -0,0 +1,172 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/jwt" +) + +var _ clients.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc clients.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc clients.Service, counter metrics.Counter, latency metrics.Histogram) clients.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) RegisterClient(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "register_client").Add(1) + ms.latency.With("method", "register_client").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.RegisterClient(ctx, token, client) +} + +func (ms *metricsMiddleware) IssueToken(ctx context.Context, identity, secret string) (jwt.Token, error) { + defer func(begin time.Time) { + ms.counter.With("method", "issue_token").Add(1) + ms.latency.With("method", "issue_token").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.IssueToken(ctx, identity, secret) +} + +func (ms *metricsMiddleware) RefreshToken(ctx context.Context, accessToken string) (token jwt.Token, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "refresh_token").Add(1) + ms.latency.With("method", "refresh_token").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.RefreshToken(ctx, accessToken) +} + +func (ms *metricsMiddleware) ViewClient(ctx context.Context, token, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_client").Add(1) + ms.latency.With("method", "view_client").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewClient(ctx, token, id) +} + +func (ms *metricsMiddleware) ViewProfile(ctx context.Context, token string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_profile").Add(1) + ms.latency.With("method", "view_profile").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewProfile(ctx, token) +} + +func (ms *metricsMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_clients").Add(1) + ms.latency.With("method", "list_clients").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListClients(ctx, token, pm) +} + +func (ms *metricsMiddleware) UpdateClient(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_client_name_and_metadata").Add(1) + ms.latency.With("method", "update_client_name_and_metadata").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClient(ctx, token, client) +} + +func (ms *metricsMiddleware) UpdateClientTags(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_client_tags").Add(1) + ms.latency.With("method", "update_client_tags").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientTags(ctx, token, client) +} + +func (ms *metricsMiddleware) UpdateClientIdentity(ctx context.Context, token, id, identity string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_client_identity").Add(1) + ms.latency.With("method", "update_client_identity").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientIdentity(ctx, token, id, identity) +} + +func (ms *metricsMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_client_secret").Add(1) + ms.latency.With("method", "update_client_secret").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) +} + +func (ms *metricsMiddleware) GenerateResetToken(ctx context.Context, email, host string) error { + defer func(begin time.Time) { + ms.counter.With("method", "generate_reset_token").Add(1) + ms.latency.With("method", "generate_reset_token").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.GenerateResetToken(ctx, email, host) +} + +func (ms *metricsMiddleware) ResetSecret(ctx context.Context, token, secret string) error { + defer func(begin time.Time) { + ms.counter.With("method", "reset_secret").Add(1) + ms.latency.With("method", "reset_secret").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ResetSecret(ctx, token, secret) +} + +func (ms *metricsMiddleware) SendPasswordReset(ctx context.Context, host, email, user, token string) error { + defer func(begin time.Time) { + ms.counter.With("method", "send_password_reset").Add(1) + ms.latency.With("method", "send_password_reset").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.SendPasswordReset(ctx, host, email, user, token) +} + +func (ms *metricsMiddleware) UpdateClientOwner(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_client_owner").Add(1) + ms.latency.With("method", "update_client_owner").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateClientOwner(ctx, token, client) +} + +func (ms *metricsMiddleware) EnableClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "enable_client").Add(1) + ms.latency.With("method", "enable_client").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.EnableClient(ctx, token, id) +} + +func (ms *metricsMiddleware) DisableClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + defer func(begin time.Time) { + ms.counter.With("method", "disable_client").Add(1) + ms.latency.With("method", "disable_client").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DisableClient(ctx, token, id) +} + +func (ms *metricsMiddleware) ListMembers(ctx context.Context, token, groupID string, pm mfclients.Page) (mp mfclients.MembersPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_members").Add(1) + ms.latency.With("method", "list_members").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListMembers(ctx, token, groupID, pm) +} + +func (ms *metricsMiddleware) Identify(ctx context.Context, token string) (string, error) { + defer func(begin time.Time) { + ms.counter.With("method", "identify").Add(1) + ms.latency.With("method", "identify").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.Identify(ctx, token) +} diff --git a/users/clients/api/requests.go b/users/clients/api/requests.go new file mode 100644 index 0000000000..4847546b9d --- /dev/null +++ b/users/clients/api/requests.go @@ -0,0 +1,259 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +const maxLimitSize = 100 + +type createClientReq struct { + client mfclients.Client + token string +} + +func (req createClientReq) validate() error { + if len(req.client.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + return req.client.Validate() +} + +type viewClientReq struct { + token string + id string +} + +func (req viewClientReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type viewProfileReq struct { + token string +} + +func (req viewProfileReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + return nil +} + +type listClientsReq struct { + token string + status mfclients.Status + offset uint64 + limit uint64 + name string + tag string + identity string + visibility string + owner string + sharedBy string + metadata mfclients.Metadata +} + +func (req listClientsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.limit > maxLimitSize || req.limit < 1 { + return apiutil.ErrLimitSize + } + if req.visibility != "" && + req.visibility != api.AllVisibility && + req.visibility != api.MyVisibility && + req.visibility != api.SharedVisibility { + return apiutil.ErrInvalidVisibilityType + } + return nil +} + +type listMembersReq struct { + mfclients.Page + token string + groupID string +} + +func (req listMembersReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + if req.groupID == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type updateClientReq struct { + token string + id string + Name string `json:"name,omitempty"` + Metadata mfclients.Metadata `json:"metadata,omitempty"` +} + +func (req updateClientReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type updateClientTagsReq struct { + id string + token string + Tags []string `json:"tags,omitempty"` +} + +func (req updateClientTagsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type updateClientOwnerReq struct { + id string + token string + Owner string `json:"owner,omitempty"` +} + +func (req updateClientOwnerReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type updateClientIdentityReq struct { + token string + id string + Identity string `json:"identity,omitempty"` +} + +func (req updateClientIdentityReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type updateClientSecretReq struct { + token string + OldSecret string `json:"old_secret,omitempty"` + NewSecret string `json:"new_secret,omitempty"` +} + +func (req updateClientSecretReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + return nil +} + +type changeClientStatusReq struct { + token string + id string +} + +func (req changeClientStatusReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} + +type loginClientReq struct { + Identity string `json:"identity,omitempty"` + Secret string `json:"secret,omitempty"` +} + +func (req loginClientReq) validate() error { + if req.Identity == "" { + return apiutil.ErrMissingIdentity + } + if req.Secret == "" { + return apiutil.ErrMissingSecret + } + return nil +} + +type tokenReq struct { + RefreshToken string `json:"refresh_token,omitempty"` +} + +func (req tokenReq) validate() error { + if req.RefreshToken == "" { + return apiutil.ErrBearerToken + } + return nil +} + +type passwResetReq struct { + Email string `json:"email"` + Host string `json:"host"` +} + +func (req passwResetReq) validate() error { + if req.Email == "" { + return apiutil.ErrMissingEmail + } + + if req.Host == "" { + return apiutil.ErrMissingHost + } + + return nil +} + +type resetTokenReq struct { + Token string `json:"token"` + Password string `json:"password"` + ConfPass string `json:"confirm_password"` +} + +func (req resetTokenReq) validate() error { + if req.Password == "" { + return apiutil.ErrMissingPass + } + + if req.ConfPass == "" { + return apiutil.ErrMissingConfPass + } + + if req.Token == "" { + return apiutil.ErrBearerToken + } + + if req.Password != req.ConfPass { + return apiutil.ErrInvalidResetPass + } + + return nil +} diff --git a/users/clients/api/responses.go b/users/clients/api/responses.go new file mode 100644 index 0000000000..3c78bf8370 --- /dev/null +++ b/users/clients/api/responses.go @@ -0,0 +1,202 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/mainflux/mainflux" + mfclients "github.com/mainflux/mainflux/pkg/clients" +) + +// MailSent message response when link is sent +const MailSent = "Email with reset link is sent" + +var ( + _ mainflux.Response = (*tokenRes)(nil) + _ mainflux.Response = (*viewClientRes)(nil) + _ mainflux.Response = (*createClientRes)(nil) + _ mainflux.Response = (*deleteClientRes)(nil) + _ mainflux.Response = (*clientsPageRes)(nil) + _ mainflux.Response = (*viewMembersRes)(nil) + _ mainflux.Response = (*memberPageRes)(nil) +) + +type pageRes struct { + Limit uint64 `json:"limit,omitempty"` + Offset uint64 `json:"offset,omitempty"` + Total uint64 `json:"total,omitempty"` +} + +type createClientRes struct { + mfclients.Client `json:",inline"` + created bool +} + +func (res createClientRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res createClientRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/users/%s", res.ID), + } + } + + return map[string]string{} +} + +func (res createClientRes) Empty() bool { + return false +} + +type tokenRes struct { + AccessToken string `json:"access_token,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` + AccessType string `json:"access_type,omitempty"` +} + +func (res tokenRes) Code() int { + return http.StatusCreated +} + +func (res tokenRes) Headers() map[string]string { + return map[string]string{} +} + +func (res tokenRes) Empty() bool { + return res.AccessToken == "" || res.RefreshToken == "" +} + +type updateClientRes struct { + mfclients.Client `json:",inline"` +} + +func (res updateClientRes) Code() int { + return http.StatusOK +} + +func (res updateClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updateClientRes) Empty() bool { + return false +} + +type viewClientRes struct { + mfclients.Client `json:",inline"` +} + +func (res viewClientRes) Code() int { + return http.StatusOK +} + +func (res viewClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewClientRes) Empty() bool { + return false +} + +type clientsPageRes struct { + pageRes + Clients []viewClientRes `json:"users"` +} + +func (res clientsPageRes) Code() int { + return http.StatusOK +} + +func (res clientsPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res clientsPageRes) Empty() bool { + return false +} + +type viewMembersRes struct { + mfclients.Client `json:",inline"` +} + +func (res viewMembersRes) Code() int { + return http.StatusOK +} + +func (res viewMembersRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewMembersRes) Empty() bool { + return false +} + +type memberPageRes struct { + pageRes + Members []viewMembersRes `json:"members"` +} + +func (res memberPageRes) Code() int { + return http.StatusOK +} + +func (res memberPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res memberPageRes) Empty() bool { + return false +} + +type deleteClientRes struct { + mfclients.Client `json:",inline"` +} + +func (res deleteClientRes) Code() int { + return http.StatusOK +} + +func (res deleteClientRes) Headers() map[string]string { + return map[string]string{} +} + +func (res deleteClientRes) Empty() bool { + return false +} + +type passwResetReqRes struct { + Msg string `json:"msg"` +} + +func (res passwResetReqRes) Code() int { + return http.StatusCreated +} + +func (res passwResetReqRes) Headers() map[string]string { + return map[string]string{} +} + +func (res passwResetReqRes) Empty() bool { + return false +} + +type passwChangeRes struct { +} + +func (res passwChangeRes) Code() int { + return http.StatusCreated +} + +func (res passwChangeRes) Headers() map[string]string { + return map[string]string{} +} + +func (res passwChangeRes) Empty() bool { + return false +} diff --git a/users/clients/api/transport.go b/users/clients/api/transport.go new file mode 100644 index 0000000000..0d8abebf16 --- /dev/null +++ b/users/clients/api/transport.go @@ -0,0 +1,421 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mflog "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/clients" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeClientsHandler returns a HTTP handler for API endpoints. +func MakeClientsHandler(svc clients.Service, mux *bone.Mux, logger mflog.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + + mux.Post("/users", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("register_client"))(registrationEndpoint(svc)), + decodeCreateClientReq, + api.EncodeResponse, + opts..., + )) + + mux.Get("/users/profile", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("view_profile"))(viewProfileEndpoint(svc)), + decodeViewProfile, + api.EncodeResponse, + opts..., + )) + + mux.Get("/users/:id", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("view_client"))(viewClientEndpoint(svc)), + decodeViewClient, + api.EncodeResponse, + opts..., + )) + + mux.Get("/users", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_clients"))(listClientsEndpoint(svc)), + decodeListClients, + api.EncodeResponse, + opts..., + )) + + mux.Get("/groups/:id/members", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_members"))(listMembersEndpoint(svc)), + decodeListMembersRequest, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/users/secret", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_client_secret"))(updateClientSecretEndpoint(svc)), + decodeUpdateClientSecret, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/users/:id", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_client_name_and_metadata"))(updateClientEndpoint(svc)), + decodeUpdateClient, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/users/:id/tags", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_client_tags"))(updateClientTagsEndpoint(svc)), + decodeUpdateClientTags, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/users/:id/identity", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_client_identity"))(updateClientIdentityEndpoint(svc)), + decodeUpdateClientIdentity, + api.EncodeResponse, + opts..., + )) + + mux.Post("/password/reset-request", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("password_reset_req"))(passwordResetRequestEndpoint(svc)), + decodePasswordResetRequest, + api.EncodeResponse, + opts..., + )) + + mux.Put("/password/reset", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("password_reset"))(passwordResetEndpoint(svc)), + decodePasswordReset, + api.EncodeResponse, + opts..., + )) + + mux.Patch("/users/:id/owner", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_client_owner"))(updateClientOwnerEndpoint(svc)), + decodeUpdateClientOwner, + api.EncodeResponse, + opts..., + )) + + mux.Post("/users/tokens/issue", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("issue_token"))(issueTokenEndpoint(svc)), + decodeCredentials, + api.EncodeResponse, + opts..., + )) + + mux.Post("/users/tokens/refresh", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("refresh_token"))(refreshTokenEndpoint(svc)), + decodeRefreshToken, + api.EncodeResponse, + opts..., + )) + + mux.Post("/users/:id/enable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("enable_client"))(enableClientEndpoint(svc)), + decodeChangeClientStatus, + api.EncodeResponse, + opts..., + )) + + mux.Post("/users/:id/disable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disable_client"))(disableClientEndpoint(svc)), + decodeChangeClientStatus, + api.EncodeResponse, + opts..., + )) + + mux.GetFunc("/health", mainflux.Health("users")) + mux.Handle("/metrics", promhttp.Handler()) + + return mux +} + +func decodeViewClient(_ context.Context, r *http.Request) (interface{}, error) { + req := viewClientReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + + return req, nil +} + +func decodeViewProfile(_ context.Context, r *http.Request) (interface{}, error) { + req := viewProfileReq{token: apiutil.ExtractBearerToken(r)} + + return req, nil +} + +func decodeListClients(_ context.Context, r *http.Request) (interface{}, error) { + var sid, oid string + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefClientStatus) + if err != nil { + return nil, err + } + o, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + l, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + m, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + + n, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + i, err := apiutil.ReadStringQuery(r, api.IdentityKey, "") + if err != nil { + return nil, err + } + t, err := apiutil.ReadStringQuery(r, api.TagKey, "") + if err != nil { + return nil, err + } + visibility, err := apiutil.ReadStringQuery(r, api.VisibilityKey, api.MyVisibility) + if err != nil { + return nil, err + } + switch visibility { + case api.MyVisibility: + oid = api.MyVisibility + case api.SharedVisibility: + sid = api.MyVisibility + case api.AllVisibility: + sid = api.MyVisibility + oid = api.MyVisibility + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listClientsReq{ + token: apiutil.ExtractBearerToken(r), + status: st, + offset: o, + limit: l, + metadata: m, + name: n, + identity: i, + tag: t, + sharedBy: sid, + owner: oid, + } + return req, nil +} + +func decodeUpdateClient(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientTags(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientTagsReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientIdentity(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientIdentityReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientSecret(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientSecretReq{ + token: apiutil.ExtractBearerToken(r), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodePasswordResetRequest(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var req passwResetReq + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + req.Host = r.Header.Get("Referer") + return req, nil +} + +func decodePasswordReset(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var req resetTokenReq + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeUpdateClientOwner(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateClientOwnerReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeCredentials(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := loginClientReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return req, nil +} + +func decodeRefreshToken(_ context.Context, r *http.Request) (interface{}, error) { + req := tokenReq{RefreshToken: apiutil.ExtractBearerToken(r)} + + return req, nil +} +func decodeCreateClientReq(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var c mfclients.Client + if err := json.NewDecoder(r.Body).Decode(&c); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + req := createClientReq{ + client: c, + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func decodeChangeClientStatus(_ context.Context, r *http.Request) (interface{}, error) { + req := changeClientStatusReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "id"), + } + + return req, nil +} + +func decodeListMembersRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefClientStatus) + if err != nil { + return nil, err + } + o, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + l, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + m, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + n, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + i, err := apiutil.ReadStringQuery(r, api.IdentityKey, "") + if err != nil { + return nil, err + } + t, err := apiutil.ReadStringQuery(r, api.TagKey, "") + if err != nil { + return nil, err + } + oid, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listMembersReq{ + token: apiutil.ExtractBearerToken(r), + Page: mfclients.Page{ + Status: st, + Offset: o, + Limit: l, + Metadata: m, + Identity: i, + Name: n, + Tag: t, + Owner: oid, + }, + groupID: bone.GetValue(r, "id"), + } + return req, nil +} diff --git a/users/clients/clients.go b/users/clients/clients.go new file mode 100644 index 0000000000..b64822b829 --- /dev/null +++ b/users/clients/clients.go @@ -0,0 +1,66 @@ +package clients + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/clients" +) + +// ClientService specifies an API that must be fullfiled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type ClientService interface { + // RegisterClient creates new client. In case of the failed registration, a + // non-nil error value is returned. + RegisterClient(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // LoginClient authenticates the client given its credentials. Successful + // authentication generates new access token. Failed invocations are + // identified by the non-nil error values in the response. + + // ViewClient retrieves client info for a given client ID and an authorized token. + ViewClient(ctx context.Context, token, id string) (clients.Client, error) + + // ViewProfile retrieves client info for a given token. + ViewProfile(ctx context.Context, token string) (clients.Client, error) + + // ListClients retrieves clients list for a valid auth token. + ListClients(ctx context.Context, token string, pm clients.Page) (clients.ClientsPage, error) + + // ListMembers retrieves everything that is assigned to a group identified by groupID. + ListMembers(ctx context.Context, token, groupID string, pm clients.Page) (clients.MembersPage, error) + + // UpdateClient updates the client's name and metadata. + UpdateClient(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // UpdateClientTags updates the client's tags. + UpdateClientTags(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // UpdateClientIdentity updates the client's identity + UpdateClientIdentity(ctx context.Context, token, id, identity string) (clients.Client, error) + + // GenerateResetToken email where mail will be sent. + // host is used for generating reset link. + GenerateResetToken(ctx context.Context, email, host string) error + + // UpdateClientSecret updates the client's secret + UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (clients.Client, error) + + // ResetSecret change users secret in reset flow. + // token can be authentication token or secret reset token. + ResetSecret(ctx context.Context, resetToken, secret string) error + + // SendPasswordReset sends reset password link to email. + SendPasswordReset(ctx context.Context, host, email, user, token string) error + + // UpdateClientOwner updates the client's owner. + UpdateClientOwner(ctx context.Context, token string, client clients.Client) (clients.Client, error) + + // EnableClient logically enableds the client identified with the provided ID + EnableClient(ctx context.Context, token, id string) (clients.Client, error) + + // DisableClient logically disables the client identified with the provided ID + DisableClient(ctx context.Context, token, id string) (clients.Client, error) + + // Identify returns the client email and id from the given token + Identify(ctx context.Context, tkn string) (string, error) +} diff --git a/users/clients/emailer.go b/users/clients/emailer.go new file mode 100644 index 0000000000..16c3f0f6f3 --- /dev/null +++ b/users/clients/emailer.go @@ -0,0 +1,9 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package clients + +// Emailer wrapper around the email +type Emailer interface { + SendPasswordReset(To []string, host, user, token string) error +} diff --git a/users/clients/emailer/emailer.go b/users/clients/emailer/emailer.go new file mode 100644 index 0000000000..68db20cab9 --- /dev/null +++ b/users/clients/emailer/emailer.go @@ -0,0 +1,28 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 +package emailer + +import ( + "fmt" + + "github.com/mainflux/mainflux/internal/email" + "github.com/mainflux/mainflux/users/clients" +) + +var _ clients.Emailer = (*emailer)(nil) + +type emailer struct { + resetURL string + agent *email.Agent +} + +// New creates new emailer utility +func New(url string, c *email.Config) (clients.Emailer, error) { + e, err := email.New(c) + return &emailer{resetURL: url, agent: e}, err +} + +func (e *emailer) SendPasswordReset(To []string, host, user, token string) error { + url := fmt.Sprintf("%s%s?token=%s", host, e.resetURL, token) + return e.agent.Send(To, "", "Password Reset Request", "", user, url, "") +} diff --git a/users/clients/hasher.go b/users/clients/hasher.go new file mode 100644 index 0000000000..fd5dfff314 --- /dev/null +++ b/users/clients/hasher.go @@ -0,0 +1,12 @@ +package clients + +// Hasher specifies an API for generating hashes of an arbitrary textual +// content. +type Hasher interface { + // Hash generates the hashed string from plain-text. + Hash(string) (string, error) + + // Compare compares plain-text version to the hashed one. An error should + // indicate failed comparison. + Compare(string, string) error +} diff --git a/users/clients/mocks/authn.go b/users/clients/mocks/authn.go new file mode 100644 index 0000000000..8619796993 --- /dev/null +++ b/users/clients/mocks/authn.go @@ -0,0 +1,82 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" + "google.golang.org/grpc" +) + +var _ policies.AuthServiceClient = (*authServiceMock)(nil) + +type SubjectSet struct { + Subject string + Relation []string +} + +type authServiceMock struct { + users map[string]string + authz map[string][]SubjectSet +} + +func (svc authServiceMock) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { + res := policies.ListPoliciesRes{} + for key := range svc.authz { + res.Objects = append(res.Objects, key) + } + return &res, nil +} + +// NewAuthService creates mock of users service. +func NewAuthService(users map[string]string, authzDB map[string][]SubjectSet) policies.AuthServiceClient { + return &authServiceMock{users, authzDB} +} + +func (svc authServiceMock) Identify(ctx context.Context, in *policies.Token, opts ...grpc.CallOption) (*policies.UserIdentity, error) { + if id, ok := svc.users[in.Value]; ok { + return &policies.UserIdentity{Id: id}, nil + } + return nil, errors.ErrAuthentication +} + +func (svc authServiceMock) Issue(ctx context.Context, in *policies.IssueReq, opts ...grpc.CallOption) (*policies.Token, error) { + if id, ok := svc.users[in.GetEmail()]; ok { + switch in.Type { + default: + return &policies.Token{Value: id}, nil + } + } + return nil, errors.ErrAuthentication +} + +func (svc authServiceMock) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { + for _, policy := range svc.authz[req.GetSub()] { + for _, r := range policy.Relation { + if r == req.GetAct() && policy.Subject == req.GetSub() { + return &policies.AuthorizeRes{Authorized: true}, nil + } + } + } + return &policies.AuthorizeRes{Authorized: false}, nil +} + +func (svc authServiceMock) AddPolicy(ctx context.Context, in *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { + if len(in.GetAct()) == 0 || in.GetObj() == "" || in.GetSub() == "" { + return &policies.AddPolicyRes{}, errors.ErrMalformedEntity + } + + svc.authz[in.GetSub()] = append(svc.authz[in.GetSub()], SubjectSet{Subject: in.GetSub(), Relation: in.GetAct()}) + return &policies.AddPolicyRes{Authorized: true}, nil +} + +func (svc authServiceMock) DeletePolicy(ctx context.Context, in *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { + if in.GetObj() == "" || in.GetSub() == "" { + return &policies.DeletePolicyRes{}, errors.ErrMalformedEntity + } + delete(svc.authz, in.GetSub()) + return &policies.DeletePolicyRes{Deleted: true}, nil +} diff --git a/users/clients/mocks/clients.go b/users/clients/mocks/clients.go new file mode 100644 index 0000000000..ef16040b16 --- /dev/null +++ b/users/clients/mocks/clients.go @@ -0,0 +1,138 @@ +package mocks + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/stretchr/testify/mock" +) + +const WrongID = "wrongID" + +var _ mfclients.Repository = (*ClientRepository)(nil) + +type ClientRepository struct { + mock.Mock +} + +func (m *ClientRepository) ChangeStatus(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + if client.Status != mfclients.EnabledStatus && client.Status != mfclients.DisabledStatus { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) Members(ctx context.Context, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + ret := m.Called(ctx, groupID, pm) + if groupID == WrongID { + return mfclients.MembersPage{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.MembersPage), ret.Error(1) +} + +func (m *ClientRepository) RetrieveAll(ctx context.Context, pm mfclients.Page) (mfclients.ClientsPage, error) { + ret := m.Called(ctx, pm) + + return ret.Get(0).(mfclients.ClientsPage), ret.Error(1) +} + +func (m *ClientRepository) RetrieveByID(ctx context.Context, id string) (mfclients.Client, error) { + ret := m.Called(ctx, id) + + if id == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) RetrieveByIdentity(ctx context.Context, identity string) (mfclients.Client, error) { + ret := m.Called(ctx, identity) + + if identity == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) Save(ctx context.Context, clients ...mfclients.Client) ([]mfclients.Client, error) { + client := clients[0] + ret := m.Called(ctx, client) + if client.Owner == WrongID { + return []mfclients.Client{}, errors.ErrMalformedEntity + } + if client.Credentials.Secret == "" { + return []mfclients.Client{}, errors.ErrMalformedEntity + } + + return clients, ret.Error(1) +} + +func (m *ClientRepository) Update(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateIdentity(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + if client.Credentials.Identity == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateSecret(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + if client.Credentials.Secret == "" { + return mfclients.Client{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateTags(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (m *ClientRepository) UpdateOwner(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + ret := m.Called(ctx, client) + + if client.ID == WrongID { + return mfclients.Client{}, errors.ErrNotFound + } + + return ret.Get(0).(mfclients.Client), ret.Error(1) +} + +func (*ClientRepository) RetrieveBySecret(ctx context.Context, key string) (mfclients.Client, error) { + return mfclients.Client{}, nil +} diff --git a/users/clients/mocks/email.go b/users/clients/mocks/email.go new file mode 100644 index 0000000000..acc852871d --- /dev/null +++ b/users/clients/mocks/email.go @@ -0,0 +1,20 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "github.com/mainflux/mainflux/users/clients" +) + +type emailerMock struct { +} + +// NewEmailer provides emailer instance for the test +func NewEmailer() clients.Emailer { + return &emailerMock{} +} + +func (e *emailerMock) SendPasswordReset([]string, string, string, string) error { + return nil +} diff --git a/users/clients/mocks/hasher.go b/users/clients/mocks/hasher.go new file mode 100644 index 0000000000..105733df81 --- /dev/null +++ b/users/clients/mocks/hasher.go @@ -0,0 +1,34 @@ +// Copyright (c) Mainflux +// SPDX-License-Identifier: Apache-2.0 + +package mocks + +import ( + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/clients" +) + +var _ clients.Hasher = (*hasherMock)(nil) + +type hasherMock struct{} + +// NewHasher creates "no-op" hasher for test purposes. This implementation will +// return secrets without changing them. +func NewHasher() clients.Hasher { + return &hasherMock{} +} + +func (hm *hasherMock) Hash(pwd string) (string, error) { + if pwd == "" { + return "", errors.ErrMalformedEntity + } + return pwd, nil +} + +func (hm *hasherMock) Compare(plain, hashed string) error { + if plain != hashed { + return errors.ErrAuthentication + } + + return nil +} diff --git a/users/clients/postgres/clients.go b/users/clients/postgres/clients.go new file mode 100644 index 0000000000..e263580e62 --- /dev/null +++ b/users/clients/postgres/clients.go @@ -0,0 +1,490 @@ +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/jackc/pgtype" // required for SQL access + "github.com/mainflux/mainflux/internal/postgres" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/groups" +) + +var _ mfclients.Repository = (*clientRepo)(nil) + +type clientRepo struct { + db postgres.Database +} + +// NewClientRepo instantiates a PostgreSQL +// implementation of Clients repository. +func NewClientRepo(db postgres.Database) mfclients.Repository { + return &clientRepo{ + db: db, + } +} + +func (clientRepo) RetrieveBySecret(ctx context.Context, key string) (mfclients.Client, error) { + return mfclients.Client{}, nil +} + +func (repo clientRepo) Save(ctx context.Context, c ...mfclients.Client) ([]mfclients.Client, error) { + q := `INSERT INTO clients (id, name, tags, owner_id, identity, secret, metadata, created_at, status, role) + VALUES (:id, :name, :tags, :owner_id, :identity, :secret, :metadata, :created_at, :status, :role) + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at` + dbc, err := toDBClient(c[0]) + if err != nil { + return []mfclients.Client{}, errors.Wrap(errors.ErrCreateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbc) + if err != nil { + return []mfclients.Client{}, postgres.HandleError(err, errors.ErrCreateEntity) + } + + defer row.Close() + row.Next() + dbc = dbClient{} + if err := row.StructScan(&dbc); err != nil { + return []mfclients.Client{}, err + } + + client, err := toClient(dbc) + if err != nil { + return []mfclients.Client{}, err + } + + return []mfclients.Client{client}, nil +} + +func (repo clientRepo) RetrieveByID(ctx context.Context, id string) (mfclients.Client, error) { + q := `SELECT id, name, tags, COALESCE(owner_id, '') AS owner_id, identity, secret, metadata, created_at, updated_at, updated_by, status + FROM clients WHERE id = :id` + + dbc := dbClient{ + ID: id, + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbc) + if err != nil { + if err == sql.ErrNoRows { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + } + return mfclients.Client{}, errors.Wrap(errors.ErrViewEntity, err) + } + + defer row.Close() + row.Next() + dbc = dbClient{} + if err := row.StructScan(&dbc); err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + } + + return toClient(dbc) +} + +func (repo clientRepo) RetrieveByIdentity(ctx context.Context, identity string) (mfclients.Client, error) { + q := `SELECT id, name, tags, COALESCE(owner_id, '') AS owner_id, identity, secret, metadata, created_at, updated_at, updated_by, status + FROM clients WHERE identity = :identity AND status = :status` + + dbc := dbClient{ + Identity: identity, + Status: mfclients.EnabledStatus, + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbc) + if err != nil { + if err == sql.ErrNoRows { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + } + return mfclients.Client{}, errors.Wrap(errors.ErrViewEntity, err) + } + + defer row.Close() + row.Next() + dbc = dbClient{} + if err := row.StructScan(&dbc); err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, err) + } + + return toClient(dbc) +} + +func (repo clientRepo) RetrieveAll(ctx context.Context, pm mfclients.Page) (mfclients.ClientsPage, error) { + query, err := pageQuery(pm) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + q := fmt.Sprintf(`SELECT c.id, c.name, c.tags, c.identity, c.metadata, COALESCE(c.owner_id, '') AS owner_id, c.status, + c.created_at, c.updated_at, COALESCE(c.updated_by, '') AS updated_by FROM clients c %s ORDER BY c.created_at LIMIT :limit OFFSET :offset;`, query) + + dbPage, err := toDBClientsPage(pm) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + defer rows.Close() + + var items []mfclients.Client + for rows.Next() { + dbc := dbClient{} + if err := rows.StructScan(&dbc); err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + c, err := toClient(dbc) + if err != nil { + return mfclients.ClientsPage{}, err + } + + items = append(items, c) + } + cq := fmt.Sprintf(`SELECT COUNT(*) FROM clients c %s;`, query) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfclients.ClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + page := mfclients.ClientsPage{ + Clients: items, + Page: mfclients.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (repo clientRepo) Members(ctx context.Context, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + emq, err := pageQuery(pm) + if err != nil { + return mfclients.MembersPage{}, err + } + + aq := "" + // If not admin, the client needs to have a g_list action on the group + if pm.Subject != "" { + aq = `AND EXISTS (SELECT 1 FROM policies WHERE policies.subject = :subject AND :action=ANY(actions))` + } + q := fmt.Sprintf(`SELECT c.id, c.name, c.tags, c.metadata, c.identity, c.status, + c.created_at, c.updated_at FROM clients c + INNER JOIN policies ON c.id=policies.subject %s AND policies.object = :group_id %s + ORDER BY c.created_at LIMIT :limit OFFSET :offset;`, emq, aq) + dbPage, err := toDBClientsPage(pm) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + dbPage.GroupID = groupID + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + defer rows.Close() + + var items []mfclients.Client + for rows.Next() { + dbc := dbClient{} + if err := rows.StructScan(&dbc); err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + + c, err := toClient(dbc) + if err != nil { + return mfclients.MembersPage{}, err + } + + items = append(items, c) + } + cq := fmt.Sprintf(`SELECT COUNT(*) FROM clients c INNER JOIN policies ON c.id=policies.subject %s AND policies.object = :group_id;`, emq) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfclients.MembersPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembers, err) + } + + page := mfclients.MembersPage{ + Members: items, + Page: mfclients.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + return page, nil +} + +func (repo clientRepo) Update(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + var query []string + var upq string + if client.Name != "" { + query = append(query, "name = :name,") + } + if client.Metadata != nil { + query = append(query, "metadata = :metadata,") + } + if len(query) > 0 { + upq = strings.Join(query, " ") + } + client.Status = mfclients.EnabledStatus + q := fmt.Sprintf(`UPDATE clients SET %s updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by`, + upq) + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateTags(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + client.Status = mfclients.EnabledStatus + q := `UPDATE clients SET tags = :tags, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateIdentity(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + q := `UPDATE clients SET identity = :identity, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateSecret(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + q := `UPDATE clients SET secret = :secret, updated_at = :updated_at, updated_by = :updated_by + WHERE identity = :identity AND status = :status + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) UpdateOwner(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + q := `UPDATE clients SET owner_id = :owner_id, updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) ChangeStatus(ctx context.Context, client mfclients.Client) (mfclients.Client, error) { + q := `UPDATE clients SET status = :status WHERE id = :id + RETURNING id, name, tags, identity, metadata, COALESCE(owner_id, '') AS owner_id, status, created_at, updated_at, updated_by` + + return repo.update(ctx, client, q) +} + +func (repo clientRepo) update(ctx context.Context, client mfclients.Client, query string) (mfclients.Client, error) { + dbc, err := toDBClient(client) + if err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + row, err := repo.db.NamedQueryContext(ctx, query, dbc) + if err != nil { + return mfclients.Client{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfclients.Client{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbc = dbClient{} + if err := row.StructScan(&dbc); err != nil { + return mfclients.Client{}, err + } + + return toClient(dbc) +} + +type dbClient struct { + ID string `db:"id"` + Name string `db:"name,omitempty"` + Tags pgtype.TextArray `db:"tags,omitempty"` + Identity string `db:"identity"` + Owner *string `db:"owner_id,omitempty"` // nullable + Secret string `db:"secret"` + Metadata []byte `db:"metadata,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` + Groups []groups.Group `db:"groups,omitempty"` + Status mfclients.Status `db:"status"` + Role mfclients.Role `db:"role"` +} + +func toDBClient(c mfclients.Client) (dbClient, error) { + data := []byte("{}") + if len(c.Metadata) > 0 { + b, err := json.Marshal(c.Metadata) + if err != nil { + return dbClient{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + var tags pgtype.TextArray + if err := tags.Set(c.Tags); err != nil { + return dbClient{}, err + } + var owner *string + if c.Owner != "" { + owner = &c.Owner + } + var updatedBy *string + if c.UpdatedBy != "" { + updatedBy = &c.UpdatedBy + } + var updatedAt sql.NullTime + if !c.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: c.UpdatedAt, Valid: true} + } + + return dbClient{ + ID: c.ID, + Name: c.Name, + Tags: tags, + Owner: owner, + Identity: c.Credentials.Identity, + Secret: c.Credentials.Secret, + Metadata: data, + CreatedAt: c.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: c.Status, + Role: c.Role, + }, nil +} + +func toClient(c dbClient) (mfclients.Client, error) { + var metadata mfclients.Metadata + if c.Metadata != nil { + if err := json.Unmarshal([]byte(c.Metadata), &metadata); err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + var tags []string + for _, e := range c.Tags.Elements { + tags = append(tags, e.String) + } + var owner string + if c.Owner != nil { + owner = *c.Owner + } + var updatedBy string + if c.UpdatedBy != nil { + updatedBy = *c.UpdatedBy + } + var updatedAt time.Time + if c.UpdatedAt.Valid { + updatedAt = c.UpdatedAt.Time + } + + return mfclients.Client{ + ID: c.ID, + Name: c.Name, + Tags: tags, + Owner: owner, + Credentials: mfclients.Credentials{ + Identity: c.Identity, + Secret: c.Secret, + }, + Metadata: metadata, + CreatedAt: c.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: c.Status, + }, nil +} + +func pageQuery(pm mfclients.Page) (string, error) { + mq, _, err := postgres.CreateMetadataQuery("", pm.Metadata) + if err != nil { + return "", errors.Wrap(errors.ErrViewEntity, err) + } + var query []string + var emq string + if mq != "" { + query = append(query, mq) + } + if pm.Identity != "" { + query = append(query, "c.identity = :identity") + } + if pm.Name != "" { + query = append(query, "c.name = :name") + } + if pm.Tag != "" { + query = append(query, ":tag = ANY(c.tags)") + } + if pm.Status != mfclients.AllStatus { + query = append(query, "c.status = :status") + } + // For listing clients that the specified client owns but not sharedby + if pm.Owner != "" && pm.SharedBy == "" { + query = append(query, "c.owner_id = :owner_id") + } + + // For listing clients that the specified client owns and that are shared with the specified client + if pm.Owner != "" && pm.SharedBy != "" { + query = append(query, "(c.owner_id = :owner_id OR EXISTS (SELECT 1 FROM policies WHERE subject = :shared_by AND :action=ANY(actions) AND object = policies.object))") + } + // For listing clients that the specified client is shared with + if pm.SharedBy != "" && pm.Owner == "" { + query = append(query, "c.owner_id != :shared_by AND (policies.object IN (SELECT object FROM policies WHERE subject = :shared_by AND :action=ANY(actions)))") + } + if len(query) > 0 { + emq = fmt.Sprintf("WHERE %s", strings.Join(query, " AND ")) + if strings.Contains(emq, "policies") { + emq = fmt.Sprintf("LEFT JOIN policies ON policies.subject = c.id %s", emq) + } + } + return emq, nil + +} + +func toDBClientsPage(pm mfclients.Page) (dbClientsPage, error) { + _, data, err := postgres.CreateMetadataQuery("", pm.Metadata) + if err != nil { + return dbClientsPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + return dbClientsPage{ + Name: pm.Name, + Identity: pm.Identity, + Metadata: data, + Owner: pm.Owner, + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + Status: pm.Status, + Tag: pm.Tag, + Subject: pm.Subject, + Action: pm.Action, + SharedBy: pm.SharedBy, + }, nil +} + +type dbClientsPage struct { + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + Name string `db:"name"` + Owner string `db:"owner_id"` + Identity string `db:"identity"` + Metadata []byte `db:"metadata"` + Tag string `db:"tag"` + Status mfclients.Status `db:"status"` + GroupID string `db:"group_id"` + SharedBy string `db:"shared_by"` + Subject string `db:"subject"` + Action string `db:"action"` +} diff --git a/users/clients/postgres/clients_test.go b/users/clients/postgres/clients_test.go new file mode 100644 index 0000000000..24bc97a0e9 --- /dev/null +++ b/users/clients/postgres/clients_test.go @@ -0,0 +1,1056 @@ +package postgres_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/users/clients/postgres" + gpostgres "github.com/mainflux/mainflux/users/groups/postgres" + "github.com/mainflux/mainflux/users/policies" + ppostgres "github.com/mainflux/mainflux/users/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + maxNameSize = 254 +) + +var ( + idProvider = uuid.New() + invalidName = strings.Repeat("m", maxNameSize+10) + password = "$tr0ngPassw0rd" + clientIdentity = "client-identity@example.com" + clientName = "client name" + wrongName = "wrong-name" + wrongID = "wrong-id" +) + +func TestClientsSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "add new client successfully", + client: mfclients.Client{ + ID: uid, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "add new client with an owner", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: uid, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: "withowner-client@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "add client with duplicate client identity", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrConflict, + }, + { + desc: "add client with invalid client id", + client: mfclients.Client{ + ID: invalidName, + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: "invalidid-client@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "add client with invalid client name", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: invalidName, + Credentials: mfclients.Credentials{ + Identity: "invalidname-client@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "add client with invalid client owner", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: invalidName, + Credentials: mfclients.Credentials{ + Identity: "invalidowner-client@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "add client with invalid client identity", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: invalidName, + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "add client with a missing client identity", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + }, + err: nil, + }, + { + desc: "add client with a missing client secret", + client: mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "missing-client-secret@example.com", + Secret: "", + }, + Metadata: mfclients.Metadata{}, + }, + err: nil, + }, + } + for _, tc := range cases { + rClient, err := repo.Save(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + rClient[0].Credentials.Secret = tc.client.Credentials.Secret + assert.Equal(t, tc.client, rClient[0], fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.client, rClient)) + } + } +} + +func TestClientsRetrieveByID(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: password, + }, + Status: mfclients.EnabledStatus, + } + + clients, err := repo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client = clients[0] + + cases := map[string]struct { + ID string + err error + }{ + "retrieve existing client": {client.ID, nil}, + "retrieve non-existing client": {wrongID, errors.ErrNotFound}, + } + + for desc, tc := range cases { + cli, err := repo.RetrieveByID(context.Background(), tc.ID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + if err == nil { + assert.Equal(t, client.ID, cli.ID, fmt.Sprintf("retrieve client by ID : client ID : expected %s got %s\n", client.ID, cli.ID)) + assert.Equal(t, client.Name, cli.Name, fmt.Sprintf("retrieve client by ID : client Name : expected %s got %s\n", client.Name, cli.Name)) + assert.Equal(t, client.Credentials.Identity, cli.Credentials.Identity, fmt.Sprintf("retrieve client by ID : client Identity : expected %s got %s\n", client.Credentials.Identity, cli.Credentials.Identity)) + assert.Equal(t, client.Status, cli.Status, fmt.Sprintf("retrieve client by ID : client Status : expected %d got %d\n", client.Status, cli.Status)) + } + } +} + +func TestClientsRetrieveByIdentity(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: clientName, + Credentials: mfclients.Credentials{ + Identity: clientIdentity, + Secret: password, + }, + Status: mfclients.EnabledStatus, + } + + _, err := repo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + identity string + err error + }{ + "retrieve existing client": {clientIdentity, nil}, + "retrieve non-existing client": {wrongID, errors.ErrNotFound}, + } + + for desc, tc := range cases { + _, err := repo.RetrieveByIdentity(context.Background(), tc.identity) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } +} + +func TestClientsRetrieveAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + grepo := gpostgres.NewGroupRepo(database) + prepo := ppostgres.NewPolicyRepo(database) + + var nClients = uint64(200) + var ownerID string + + meta := mfclients.Metadata{ + "admin": "true", + } + wrongMeta := mfclients.Metadata{ + "admin": "false", + } + var expectedClients = []mfclients.Client{} + + var sharedGroup = mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "shared-group", + } + _, err := grepo.Save(context.Background(), sharedGroup) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + for i := uint64(0); i < nClients; i++ { + identity := fmt.Sprintf("TestRetrieveAll%d@example.com", i) + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + if i == 1 { + ownerID = client.ID + } + if i%10 == 0 { + client.Owner = ownerID + client.Metadata = meta + client.Tags = []string{"Test"} + } + if i%50 == 0 { + client.Status = mfclients.DisabledStatus + } + _, err := repo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client.Credentials.Secret = "" + expectedClients = append(expectedClients, client) + var policy = policies.Policy{ + Subject: client.ID, + Object: sharedGroup.ID, + Actions: []string{"c_list"}, + } + err = prepo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + + cases := map[string]struct { + size uint64 + pm mfclients.Page + response []mfclients.Client + }{ + "retrieve all clients empty page": { + pm: mfclients.Page{}, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients, + size: 200, + }, + "retrieve all clients with limit": { + pm: mfclients.Page{ + Offset: 0, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[0:50], + size: 50, + }, + "retrieve all clients with offset": { + pm: mfclients.Page{ + Offset: 50, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients[50:200], + size: 150, + }, + "retrieve all clients with limit and offset": { + pm: mfclients.Page{ + Offset: 50, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[50:100], + size: 50, + }, + "retrieve all clients with limit and offset not full": { + pm: mfclients.Page{ + Offset: 170, + Limit: 50, + Status: mfclients.AllStatus, + }, + response: expectedClients[170:200], + size: 30, + }, + "retrieve all clients by metadata": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Metadata: meta, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 20, + }, + "retrieve clients by wrong metadata": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Metadata: wrongMeta, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by name": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Name: "TestRetrieveAll3@example.com", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[3]}, + size: 1, + }, + "retrieve clients by wrong name": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Name: wrongName, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by owner": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Owner: ownerID, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 19, + }, + "retrieve clients by wrong owner": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Owner: wrongID, + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by disabled status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: mfclients.DisabledStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[50], expectedClients[100], expectedClients[150]}, + size: 4, + }, + "retrieve all clients by combined status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: mfclients.AllStatus, + }, + response: expectedClients, + size: 200, + }, + "retrieve clients by the wrong status": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Status: 10, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by tags": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Tag: "Test", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{expectedClients[0], expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 20, + }, + "retrieve clients by wrong tags": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + Tag: "wrongTags", + Status: mfclients.AllStatus, + }, + response: []mfclients.Client{}, + size: 0, + }, + "retrieve all clients by sharedby": { + pm: mfclients.Page{ + Offset: 0, + Limit: nClients, + Total: nClients, + SharedBy: expectedClients[0].ID, + Status: mfclients.AllStatus, + Action: "c_list", + }, + response: []mfclients.Client{expectedClients[10], expectedClients[20], expectedClients[30], expectedClients[40], expectedClients[50], expectedClients[60], + expectedClients[70], expectedClients[80], expectedClients[90], expectedClients[100], expectedClients[110], expectedClients[120], expectedClients[130], + expectedClients[140], expectedClients[150], expectedClients[160], expectedClients[170], expectedClients[180], expectedClients[190], + }, + size: 19, + }, + } + for desc, tc := range cases { + page, err := repo.RetrieveAll(context.Background(), tc.pm) + size := uint64(len(page.Clients)) + assert.ElementsMatch(t, page.Clients, tc.response, fmt.Sprintf("%s: expected %v got %v\n", desc, tc.response, page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestClientsUpdateMetadata(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{ + "name": "enabled-client", + }, + Tags: []string{"enabled", "tag1"}, + Status: mfclients.EnabledStatus, + } + + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client", + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{ + "name": "disabled-client", + }, + Tags: []string{"disabled", "tag1"}, + Status: mfclients.DisabledStatus, + } + + clients1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with metadata: expected %v got %s\n", nil, err)) + clients2, err := repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + client1 = clients1[0] + client2 = clients2[0] + + ucases := []struct { + desc string + update string + client mfclients.Client + err error + }{ + { + desc: "update metadata for enabled client", + update: "metadata", + client: mfclients.Client{ + ID: client1.ID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: nil, + }, + { + desc: "update metadata for disabled client", + update: "metadata", + client: mfclients.Client{ + ID: client2.ID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update name for enabled client", + update: "name", + client: mfclients.Client{ + ID: client1.ID, + Name: "updated name", + }, + err: nil, + }, + { + desc: "update name for disabled client", + update: "name", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name", + }, + err: errors.ErrNotFound, + }, + { + desc: "update name and metadata for enabled client", + update: "both", + client: mfclients.Client{ + ID: client1.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: nil, + }, + { + desc: "update name and metadata for a disabled client", + update: "both", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update metadata for invalid client", + update: "metadata", + client: mfclients.Client{ + ID: wrongID, + Metadata: mfclients.Metadata{ + "update": "metadata", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update name for invalid client", + update: "name", + client: mfclients.Client{ + ID: wrongID, + Name: "updated name", + }, + err: errors.ErrNotFound, + }, + { + desc: "update name and metadata for invalid client", + update: "both", + client: mfclients.Client{ + ID: client2.ID, + Name: "updated name and metadata", + Metadata: mfclients.Metadata{ + "update": "name and metadata", + }, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.Update(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + if tc.client.Name != "" { + assert.Equal(t, expected.Name, tc.client.Name, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.Name, tc.client.Name)) + } + if tc.client.Metadata != nil { + assert.Equal(t, expected.Metadata, tc.client.Metadata, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.Metadata, tc.client.Metadata)) + } + + } + } +} + +func TestClientsUpdateTags(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client-with-tags", + Credentials: mfclients.Credentials{ + Identity: "client1-update-tags@example.com", + Secret: password, + }, + Tags: []string{"test", "enabled"}, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client-with-tags", + Credentials: mfclients.Credentials{ + Identity: "client2-update-tags@example.com", + Secret: password, + }, + Tags: []string{"test", "disabled"}, + Status: mfclients.DisabledStatus, + } + + clients1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with tags: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, client1.ID, fmt.Sprintf("add new client with tags: expected %v got %s\n", nil, err)) + } + client1 = clients1[0] + clients2, err := repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client with tags: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, client2.ID, fmt.Sprintf("add new disabled client with tags: expected %v got %s\n", nil, err)) + } + client2 = clients2[0] + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update tags for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Tags: []string{"updated"}, + }, + err: nil, + }, + { + desc: "update tags for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Tags: []string{"updated"}, + }, + err: errors.ErrNotFound, + }, + { + desc: "update tags for invalid client", + client: mfclients.Client{ + ID: wrongID, + Tags: []string{"updated"}, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.UpdateTags(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Tags, expected.Tags, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Tags, expected.Tags)) + } + } +} + +func TestClientsUpdateSecret(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: password, + }, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client", + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: password, + }, + Status: mfclients.DisabledStatus, + } + + rClients1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, rClients1[0].ID, fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + } + rClients2, err := repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, rClients2[0].ID, fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + } + + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update secret for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: "newpassword", + }, + }, + err: nil, + }, + { + desc: "update secret for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: "newpassword", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update secret for invalid client", + client: mfclients.Client{ + ID: wrongID, + Credentials: mfclients.Credentials{ + Identity: "client3-update@example.com", + Secret: "newpassword", + }, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + _, err := repo.UpdateSecret(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + c, err := repo.RetrieveByIdentity(context.Background(), tc.client.Credentials.Identity) + require.Nil(t, err, fmt.Sprintf("retrieve client by id during update of secret unexpected error: %s", err)) + assert.Equal(t, tc.client.Credentials.Secret, c.Credentials.Secret, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Credentials.Secret, c.Credentials.Secret)) + } + } +} + +func TestClientsUpdateIdentity(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: password, + }, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client", + Credentials: mfclients.Credentials{ + Identity: "client2-update@example.com", + Secret: password, + }, + Status: mfclients.DisabledStatus, + } + + rClients1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, rClients1[0].ID, fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + } + rClients2, err := repo.Save(context.Background(), client2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, rClients2[0].ID, fmt.Sprintf("add new disabled client: expected %v got %s\n", nil, err)) + } + + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update identity for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Credentials: mfclients.Credentials{ + Identity: "client1-updated@example.com", + }, + }, + err: nil, + }, + { + desc: "update identity for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Credentials: mfclients.Credentials{ + Identity: "client2-updated@example.com", + }, + }, + err: errors.ErrNotFound, + }, + { + desc: "update identity for invalid client", + client: mfclients.Client{ + ID: wrongID, + Credentials: mfclients.Credentials{ + Identity: "client3-updated@example.com", + }, + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.UpdateIdentity(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Credentials.Identity, expected.Credentials.Identity, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Credentials.Identity, expected.Credentials.Identity)) + } + } +} + +func TestClientsUpdateOwner(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client-with-owner", + Credentials: mfclients.Credentials{ + Identity: "client1-update-owner@example.com", + Secret: password, + }, + Owner: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "disabled-client-with-owner", + Credentials: mfclients.Credentials{ + Identity: "client2-update-owner@example.com", + Secret: password, + }, + Owner: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.DisabledStatus, + } + + clients1, err := repo.Save(context.Background(), client1) + client1 = clients1[0] + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client with owner: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client1.ID, client1.ID, fmt.Sprintf("add new client with owner: expected %v got %s\n", nil, err)) + } + clients2, err := repo.Save(context.Background(), client2) + client2 = clients2[0] + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled client with owner: expected %v got %s\n", nil, err)) + if err == nil { + assert.Equal(t, client2.ID, client2.ID, fmt.Sprintf("add new disabled client with owner: expected %v got %s\n", nil, err)) + } + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "update owner for enabled client", + client: mfclients.Client{ + ID: client1.ID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: nil, + }, + { + desc: "update owner for disabled client", + client: mfclients.Client{ + ID: client2.ID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: errors.ErrNotFound, + }, + { + desc: "update owner for invalid client", + client: mfclients.Client{ + ID: wrongID, + Owner: testsutil.GenerateUUID(t, idProvider), + }, + err: errors.ErrNotFound, + }, + } + for _, tc := range ucases { + expected, err := repo.UpdateOwner(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Owner, expected.Owner, fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.client.Owner, expected.Owner)) + } + } +} + +func TestClientsChangeStatus(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := cpostgres.NewClientRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "enabled-client", + Credentials: mfclients.Credentials{ + Identity: "client1-update@example.com", + Secret: password, + }, + Status: mfclients.EnabledStatus, + } + + clients1, err := repo.Save(context.Background(), client1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new client: expected %v got %s\n", nil, err)) + client1 = clients1[0] + + ucases := []struct { + desc string + client mfclients.Client + err error + }{ + { + desc: "change client status for an enabled client", + client: mfclients.Client{ + ID: client1.ID, + Status: 0, + }, + err: nil, + }, + { + desc: "change client status for a disabled client", + client: mfclients.Client{ + ID: client1.ID, + Status: 1, + }, + err: nil, + }, + { + desc: "change client status for non-existing client", + client: mfclients.Client{ + ID: "invalid", + Status: 2, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range ucases { + expected, err := repo.ChangeStatus(context.Background(), tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.client.Status, expected.Status, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.client.Status, expected.Status)) + } + } +} diff --git a/users/clients/postgres/doc.go b/users/clients/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/users/clients/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/users/clients/postgres/setup_test.go b/users/clients/postgres/setup_test.go new file mode 100644 index 0000000000..e377f955a8 --- /dev/null +++ b/users/clients/postgres/setup_test.go @@ -0,0 +1,93 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + upostgres "github.com/mainflux/mainflux/users/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgClient.SetupDB(dbConfig, *upostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/users/clients/service.go b/users/clients/service.go new file mode 100644 index 0000000000..6f2695cc89 --- /dev/null +++ b/users/clients/service.go @@ -0,0 +1,439 @@ +package clients + +import ( + "context" + "regexp" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/jwt" + "github.com/mainflux/mainflux/users/policies" +) + +const ( + MyKey = "mine" + clientsObjectKey = "clients" + updateRelationKey = "c_update" + listRelationKey = "c_list" + deleteRelationKey = "c_delete" + entityType = "client" +) + +var ( + // ErrMissingResetToken indicates malformed or missing reset token + // for reseting password. + ErrMissingResetToken = errors.New("missing reset token") + + // ErrRecoveryToken indicates error in generating password recovery token. + ErrRecoveryToken = errors.New("failed to generate password recovery token") + + // ErrGetToken indicates error in getting signed token. + ErrGetToken = errors.New("failed to fetch signed token") + + // ErrPasswordFormat indicates weak password. + ErrPasswordFormat = errors.New("password does not meet the requirements") +) + +// Service unites Clients and Group services. +type Service interface { + ClientService + jwt.TokenService +} + +type service struct { + clients mfclients.Repository + policies policies.PolicyRepository + idProvider mainflux.IDProvider + hasher Hasher + tokens jwt.TokenRepository + email Emailer + passRegex *regexp.Regexp +} + +// NewService returns a new Clients service implementation. +func NewService(c mfclients.Repository, p policies.PolicyRepository, t jwt.TokenRepository, e Emailer, h Hasher, idp mainflux.IDProvider, pr *regexp.Regexp) Service { + return service{ + clients: c, + policies: p, + hasher: h, + tokens: t, + email: e, + idProvider: idp, + passRegex: pr, + } +} + +func (svc service) RegisterClient(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + clientID, err := svc.idProvider.ID() + if err != nil { + return mfclients.Client{}, err + } + + // We don't check the error currently since we can register client with empty token + ownerID, _ := svc.Identify(ctx, token) + if ownerID != "" && cli.Owner == "" { + cli.Owner = ownerID + } + if cli.Credentials.Secret == "" { + return mfclients.Client{}, apiutil.ErrMissingSecret + } + hash, err := svc.hasher.Hash(cli.Credentials.Secret) + if err != nil { + return mfclients.Client{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + cli.Credentials.Secret = hash + if cli.Status != mfclients.DisabledStatus && cli.Status != mfclients.EnabledStatus { + return mfclients.Client{}, apiutil.ErrInvalidStatus + } + if cli.Role != mfclients.UserRole && cli.Role != mfclients.AdminRole { + return mfclients.Client{}, apiutil.ErrInvalidRole + } + cli.ID = clientID + cli.CreatedAt = time.Now() + + client, err := svc.clients.Save(ctx, cli) + if err != nil { + return mfclients.Client{}, err + } + + return client[0], nil +} + +func (svc service) IssueToken(ctx context.Context, identity, secret string) (jwt.Token, error) { + dbUser, err := svc.clients.RetrieveByIdentity(ctx, identity) + if err != nil { + return jwt.Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + if err := svc.hasher.Compare(secret, dbUser.Credentials.Secret); err != nil { + return jwt.Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + + claims := jwt.Claims{ + ClientID: dbUser.ID, + Email: dbUser.Credentials.Identity, + } + + return svc.tokens.Issue(ctx, claims) +} + +func (svc service) RefreshToken(ctx context.Context, accessToken string) (jwt.Token, error) { + claims, err := svc.tokens.Parse(ctx, accessToken) + if err != nil { + return jwt.Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + if claims.Type != jwt.RefreshToken { + return jwt.Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + if _, err := svc.clients.RetrieveByID(ctx, claims.ClientID); err != nil { + return jwt.Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + + return svc.tokens.Issue(ctx, claims) +} + +func (svc service) ViewClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + ir, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if ir == id { + return svc.clients.RetrieveByID(ctx, id) + } + + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: ir, Object: id, Actions: []string{listRelationKey}}); err != nil { + return mfclients.Client{}, err + } + + return svc.clients.RetrieveByID(ctx, id) +} + +func (svc service) ViewProfile(ctx context.Context, token string) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + return svc.clients.RetrieveByID(ctx, id) +} + +func (svc service) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.ClientsPage{}, err + } + + if pm.SharedBy == MyKey { + pm.SharedBy = id + } + if pm.Owner == MyKey { + pm.Owner = id + } + pm.Action = "c_list" + + // If the user is admin, fetch all things from database. + p := policies.Policy{Subject: id, Object: clientsObjectKey, Actions: []string{listRelationKey}} + if err := svc.authorize(ctx, clientsObjectKey, p); err == nil { + pm.SharedBy = "" + pm.Owner = "" + pm.Action = "" + } + + clients, err := svc.clients.RetrieveAll(ctx, pm) + if err != nil { + return mfclients.ClientsPage{}, err + } + for i, client := range clients.Clients { + if client.ID == id { + clients.Clients = append(clients.Clients[:i], clients.Clients[i+1:]...) + if clients.Total != 0 { + clients.Total = clients.Total - 1 + } + } + } + + return clients, nil +} + +func (svc service) UpdateClient(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: cli.ID, Actions: []string{updateRelationKey}}); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Name: cli.Name, + Metadata: cli.Metadata, + UpdatedAt: time.Now(), + UpdatedBy: id, + } + + return svc.clients.Update(ctx, client) +} + +func (svc service) UpdateClientTags(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: cli.ID, Actions: []string{updateRelationKey}}); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Tags: cli.Tags, + UpdatedAt: time.Now(), + UpdatedBy: id, + } + + return svc.clients.UpdateTags(ctx, client) +} + +func (svc service) UpdateClientIdentity(ctx context.Context, token, clientID, identity string) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: clientID, Actions: []string{updateRelationKey}}); err != nil { + return mfclients.Client{}, err + } + + cli := mfclients.Client{ + ID: id, + Credentials: mfclients.Credentials{ + Identity: identity, + }, + UpdatedAt: time.Now(), + UpdatedBy: id, + } + return svc.clients.UpdateIdentity(ctx, cli) +} + +func (svc service) GenerateResetToken(ctx context.Context, email, host string) error { + client, err := svc.clients.RetrieveByIdentity(ctx, email) + if err != nil || client.Credentials.Identity == "" { + return errors.ErrNotFound + } + claims := jwt.Claims{ + ClientID: client.ID, + Email: client.Credentials.Identity, + } + t, err := svc.tokens.Issue(ctx, claims) + if err != nil { + return errors.Wrap(ErrRecoveryToken, err) + } + return svc.SendPasswordReset(ctx, host, email, client.Name, t.AccessToken) +} + +func (svc service) ResetSecret(ctx context.Context, resetToken, secret string) error { + id, err := svc.Identify(ctx, resetToken) + if err != nil { + return errors.Wrap(errors.ErrAuthentication, err) + } + c, err := svc.clients.RetrieveByID(ctx, id) + if err != nil { + return err + } + if c.Credentials.Identity == "" { + return errors.ErrNotFound + } + if !svc.passRegex.MatchString(secret) { + return ErrPasswordFormat + } + secret, err = svc.hasher.Hash(secret) + if err != nil { + return err + } + c = mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: c.Credentials.Identity, + Secret: secret, + }, + UpdatedAt: time.Now(), + UpdatedBy: id, + } + if _, err := svc.clients.UpdateSecret(ctx, c); err != nil { + return err + } + return nil +} + +func (svc service) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if !svc.passRegex.MatchString(newSecret) { + return mfclients.Client{}, ErrPasswordFormat + } + dbClient, err := svc.clients.RetrieveByID(ctx, id) + if err != nil { + return mfclients.Client{}, err + } + if _, err := svc.IssueToken(ctx, dbClient.Credentials.Identity, oldSecret); err != nil { + return mfclients.Client{}, errors.ErrAuthentication + } + newSecret, err = svc.hasher.Hash(newSecret) + if err != nil { + return mfclients.Client{}, err + } + dbClient.Credentials.Secret = newSecret + dbClient.UpdatedAt = time.Now() + dbClient.UpdatedBy = id + + return svc.clients.UpdateSecret(ctx, dbClient) +} + +func (svc service) SendPasswordReset(_ context.Context, host, email, user, token string) error { + to := []string{email} + return svc.email.SendPasswordReset(to, host, user, token) +} + +func (svc service) UpdateClientOwner(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: cli.ID, Actions: []string{updateRelationKey}}); err != nil { + return mfclients.Client{}, err + } + + client := mfclients.Client{ + ID: cli.ID, + Owner: cli.Owner, + UpdatedAt: time.Now(), + UpdatedBy: id, + } + + return svc.clients.UpdateOwner(ctx, client) +} + +func (svc service) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + client := mfclients.Client{ + ID: id, + UpdatedAt: time.Now(), + Status: mfclients.EnabledStatus, + } + client, err := svc.changeClientStatus(ctx, token, client) + if err != nil { + return mfclients.Client{}, errors.Wrap(mfclients.ErrEnableClient, err) + } + + return client, nil +} + +func (svc service) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + client := mfclients.Client{ + ID: id, + UpdatedAt: time.Now(), + Status: mfclients.DisabledStatus, + } + client, err := svc.changeClientStatus(ctx, token, client) + if err != nil { + return mfclients.Client{}, errors.Wrap(mfclients.ErrDisableClient, err) + } + + return client, nil +} + +func (svc service) ListMembers(ctx context.Context, token, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.MembersPage{}, err + } + // If the user is admin, fetch all members from the database. + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: clientsObjectKey, Actions: []string{listRelationKey}}); err == nil { + return svc.clients.Members(ctx, groupID, pm) + } + pm.Subject = id + pm.Action = "g_list" + + return svc.clients.Members(ctx, groupID, pm) +} + +func (svc service) changeClientStatus(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + id, err := svc.Identify(ctx, token) + if err != nil { + return mfclients.Client{}, err + } + if err := svc.authorize(ctx, entityType, policies.Policy{Subject: id, Object: client.ID, Actions: []string{deleteRelationKey}}); err != nil { + return mfclients.Client{}, err + } + dbClient, err := svc.clients.RetrieveByID(ctx, client.ID) + if err != nil { + return mfclients.Client{}, err + } + if dbClient.Status == client.Status { + return mfclients.Client{}, mfclients.ErrStatusAlreadyAssigned + } + client.UpdatedBy = id + return svc.clients.ChangeStatus(ctx, client) +} + +func (svc service) authorize(ctx context.Context, entityType string, p policies.Policy) error { + if err := p.Validate(); err != nil { + return err + } + if err := svc.policies.CheckAdmin(ctx, p.Subject); err == nil { + return nil + } + return svc.policies.Evaluate(ctx, entityType, p) +} + +func (svc service) Identify(ctx context.Context, tkn string) (string, error) { + claims, err := svc.tokens.Parse(ctx, tkn) + if err != nil { + return "", errors.Wrap(errors.ErrAuthentication, err) + } + if claims.Type != jwt.AccessToken { + return "", errors.ErrAuthentication + } + + return claims.ClientID, nil +} diff --git a/users/clients/service_test.go b/users/clients/service_test.go new file mode 100644 index 0000000000..1bbe6b5a9a --- /dev/null +++ b/users/clients/service_test.go @@ -0,0 +1,1436 @@ +package clients_test + +import ( + context "context" + fmt "fmt" + "regexp" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/hasher" + "github.com/mainflux/mainflux/users/jwt" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + phasher = hasher.New() + secret = "strongsecret" + validCMetadata = mfclients.Metadata{"role": "client"} + client = mfclients.Client{ + ID: testsutil.GenerateUUID(&testing.T{}, idProvider), + Name: "clientname", + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{Identity: "clientidentity", Secret: secret}, + Metadata: validCMetadata, + Status: mfclients.EnabledStatus, + } + inValidToken = "invalidToken" + withinDuration = 5 * time.Second + passRegex = regexp.MustCompile("^.{8,}$") + accessDuration = time.Minute * 1 + refreshDuration = time.Minute * 10 +) + +func TestRegisterClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + cases := []struct { + desc string + client mfclients.Client + token string + err error + }{ + { + desc: "register new client", + client: client, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + err: nil, + }, + { + desc: "register existing client", + client: client, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + err: errors.ErrConflict, + }, + { + desc: "register a new enabled client with name", + client: mfclients.Client{ + Name: "clientWithName", + Credentials: mfclients.Credentials{ + Identity: "newclientwithname@example.com", + Secret: secret, + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new disabled client with name", + client: mfclients.Client{ + Name: "clientWithName", + Credentials: mfclients.Credentials{ + Identity: "newclientwithname@example.com", + Secret: secret, + }, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new enabled client with tags", + client: mfclients.Client{ + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithtags@example.com", + Secret: secret, + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new disabled client with tags", + client: mfclients.Client{ + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithtags@example.com", + Secret: secret, + }, + Status: mfclients.DisabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new enabled client with metadata", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithmetadata@example.com", + Secret: secret, + }, + Metadata: validCMetadata, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new disabled client with metadata", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithmetadata@example.com", + Secret: secret, + }, + Metadata: validCMetadata, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new disabled client", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithvalidstatus@example.com", + Secret: secret, + }, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with valid disabled status", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithvalidstatus@example.com", + Secret: secret, + }, + Status: mfclients.DisabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with all fields", + client: mfclients.Client{ + Name: "newclientwithallfields", + Tags: []string{"tag1", "tag2"}, + Credentials: mfclients.Credentials{ + Identity: "newclientwithallfields@example.com", + Secret: secret, + }, + Metadata: mfclients.Metadata{ + "name": "newclientwithallfields", + }, + Status: mfclients.EnabledStatus, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with missing identity", + client: mfclients.Client{ + Name: "clientWithMissingIdentity", + Credentials: mfclients.Credentials{ + Secret: secret, + }, + }, + err: errors.ErrMalformedEntity, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with invalid owner", + client: mfclients.Client{ + Owner: mocks.WrongID, + Credentials: mfclients.Credentials{ + Identity: "newclientwithinvalidowner@example.com", + Secret: secret, + }, + }, + err: errors.ErrMalformedEntity, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with empty secret", + client: mfclients.Client{ + Owner: testsutil.GenerateUUID(t, idProvider), + Credentials: mfclients.Credentials{ + Identity: "newclientwithemptysecret@example.com", + }, + }, + err: apiutil.ErrMissingSecret, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + { + desc: "register a new client with invalid status", + client: mfclients.Client{ + Credentials: mfclients.Credentials{ + Identity: "newclientwithinvalidstatus@example.com", + Secret: secret, + }, + Status: mfclients.AllStatus, + }, + err: apiutil.ErrInvalidStatus, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("Save", context.Background(), mock.Anything).Return(&mfclients.Client{}, tc.err) + registerTime := time.Now() + expected, err := svc.RegisterClient(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, expected.ID, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, expected.ID)) + assert.WithinDuration(t, expected.CreatedAt, registerTime, withinDuration, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.CreatedAt, registerTime)) + tc.client.ID = expected.ID + tc.client.CreatedAt = expected.CreatedAt + tc.client.UpdatedAt = expected.UpdatedAt + tc.client.Credentials.Secret = expected.Credentials.Secret + tc.client.Owner = expected.Owner + tc.client.UpdatedBy = expected.UpdatedBy + assert.Equal(t, tc.client, expected, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.client, expected)) + ok := repoCall.Parent.AssertCalled(t, "Save", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + } + repoCall.Unset() + } +} + +func TestViewClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + cases := []struct { + desc string + token string + clientID string + response mfclients.Client + err error + }{ + { + desc: "view client successfully", + response: client, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + clientID: client.ID, + err: nil, + }, + { + desc: "view client with an invalid token", + response: mfclients.Client{}, + token: inValidToken, + clientID: "", + err: errors.ErrAuthentication, + }, + { + desc: "view client with valid token and invalid client id", + response: mfclients.Client{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + clientID: mocks.WrongID, + err: errors.ErrNotFound, + }, + { + desc: "view client with an invalid token and invalid client id", + response: mfclients.Client{}, + token: inValidToken, + clientID: mocks.WrongID, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("Evaluate", context.Background(), "client", mock.Anything).Return(nil) + repoCall1 := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall2 := cRepo.On("RetrieveByID", context.Background(), tc.clientID).Return(tc.response, tc.err) + rClient, err := svc.ViewClient(context.Background(), tc.token, tc.clientID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, rClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, rClient)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.clientID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + repoCall2.Unset() + repoCall1.Unset() + repoCall.Unset() + } +} + +func TestListClients(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + var nClients = uint64(200) + var aClients = []mfclients.Client{} + var OwnerID = testsutil.GenerateUUID(t, idProvider) + for i := uint64(1); i < nClients; i++ { + identity := fmt.Sprintf("TestListClients_%d@example.com", i) + client := mfclients.Client{ + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: "password", + }, + Tags: []string{"tag1", "tag2"}, + Metadata: mfclients.Metadata{"role": "client"}, + } + if i%50 == 0 { + client.Owner = OwnerID + client.Owner = testsutil.GenerateUUID(t, idProvider) + } + aClients = append(aClients, client) + } + + cases := []struct { + desc string + token string + page mfclients.Page + response mfclients.ClientsPage + size uint64 + err error + }{ + { + desc: "list clients with authorized token", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + + page: mfclients.Page{ + Status: mfclients.AllStatus, + }, + size: 0, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + err: nil, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + page: mfclients.Page{ + Status: mfclients.AllStatus, + }, + size: 0, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients that are shared with me", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that are shared with me with a specific name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that are shared with me with an invalid name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients that I own", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Status: mfclients.EnabledStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own with a specific name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own with an invalid name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients that I own and are shared with me", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Owner: clients.MyKey, + SharedBy: clients.MyKey, + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own and are shared with me with a specific name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Owner: clients.MyKey, + Name: "TestListClients3", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 4, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{aClients[0], aClients[50], aClients[100], aClients[150]}, + }, + size: 4, + }, + { + desc: "list clients that I own and are shared with me with an invalid name", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + SharedBy: clients.MyKey, + Owner: clients.MyKey, + Name: "notpresentclient", + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Clients: []mfclients.Client{}, + }, + size: 0, + }, + { + desc: "list clients with offset and limit", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Status: mfclients.AllStatus, + }, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: nClients - 6, + Offset: 0, + Limit: 0, + }, + Clients: aClients[6:nClients], + }, + size: nClients - 6, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(tc.err) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, tc.err) + page, err := svc.ListClients(context.Background(), tc.token, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "RetrieveAll", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("RetrieveAll was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + client1 := client + client2 := client + client1.Name = "Updated client" + client2.Metadata = mfclients.Metadata{"role": "test"} + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client name with valid token", + client: client1, + response: client1, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + err: nil, + }, + { + desc: "update client name with invalid token", + client: client1, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + { + desc: "update client name with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Name: "Updated Client", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + { + desc: "update client metadata with valid token", + client: client2, + response: client2, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + err: nil, + }, + { + desc: "update client metadata with invalid token", + client: client2, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("Update", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClient(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Update", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientTags(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + client.Tags = []string{"updated"} + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client tags with valid token", + client: client, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + response: client, + err: nil, + }, + { + desc: "update client tags with invalid token", + client: client, + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "update client name with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Name: "Updated name", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateTags", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientTags(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateTags", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateTags was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientIdentity(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + client2 := client + client2.Credentials.Identity = "updated@example.com" + + cases := []struct { + desc string + identity string + response mfclients.Client + token string + id string + err error + }{ + { + desc: "update client identity with valid token", + identity: "updated@example.com", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + id: client.ID, + response: client2, + err: nil, + }, + { + desc: "update client identity with invalid id", + identity: "updated@example.com", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + id: mocks.WrongID, + response: mfclients.Client{}, + err: errors.ErrNotFound, + }, + { + desc: "update client identity with invalid token", + identity: "updated@example.com", + token: "non-existent", + id: client2.ID, + response: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateIdentity", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientIdentity(context.Background(), tc.token, tc.id, tc.identity) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateIdentity", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateIdentity was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientOwner(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + client.Owner = "newowner@mail.com" + + cases := []struct { + desc string + client mfclients.Client + response mfclients.Client + token string + err error + }{ + { + desc: "update client owner with valid token", + client: client, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + response: client, + err: nil, + }, + { + desc: "update client owner with invalid token", + client: client, + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "update client owner with invalid ID", + client: mfclients.Client{ + ID: mocks.WrongID, + Owner: "updatedowner@mail.com", + }, + response: mfclients.Client{}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("UpdateOwner", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientOwner(context.Background(), tc.token, tc.client) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "UpdateOwner", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateOwner was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestUpdateClientSecret(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + rClient := client + rClient.Credentials.Secret, _ = phasher.Hash(client.Credentials.Secret) + + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), client.Credentials.Identity).Return(rClient, nil) + token, err := svc.IssueToken(context.Background(), client.Credentials.Identity, client.Credentials.Secret) + assert.Nil(t, err, fmt.Sprintf("Issue token expected nil got %s\n", err)) + repoCall.Unset() + + cases := []struct { + desc string + oldSecret string + newSecret string + token string + response mfclients.Client + err error + }{ + { + desc: "update client secret with valid token", + oldSecret: client.Credentials.Secret, + newSecret: "newSecret", + token: token.AccessToken, + response: rClient, + err: nil, + }, + { + desc: "update client secret with invalid token", + oldSecret: client.Credentials.Secret, + newSecret: "newPassword", + token: "non-existent", + response: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "update client secret with wrong old secret", + oldSecret: "oldSecret", + newSecret: "newSecret", + token: token.AccessToken, + response: mfclients.Client{}, + err: apiutil.ErrInvalidSecret, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByID", context.Background(), client.ID).Return(tc.response, tc.err) + repoCall1 := cRepo.On("RetrieveByIdentity", context.Background(), client.Credentials.Identity).Return(tc.response, tc.err) + repoCall2 := cRepo.On("UpdateSecret", context.Background(), mock.Anything).Return(tc.response, tc.err) + updatedClient, err := svc.UpdateClientSecret(context.Background(), tc.token, tc.oldSecret, tc.newSecret) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, updatedClient, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, updatedClient)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.response.ID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByIdentity", context.Background(), tc.response.Credentials.Identity) + assert.True(t, ok, fmt.Sprintf("RetrieveByIdentity was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "UpdateSecret", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("UpdateSecret was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } +} + +func TestEnableClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + enabledClient1 := mfclients.Client{ID: testsutil.GenerateUUID(t, idProvider), Credentials: mfclients.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus} + disabledClient1 := mfclients.Client{ID: testsutil.GenerateUUID(t, idProvider), Credentials: mfclients.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus} + endisabledClient1 := disabledClient1 + endisabledClient1.Status = mfclients.EnabledStatus + + cases := []struct { + desc string + id string + token string + client mfclients.Client + response mfclients.Client + err error + }{ + { + desc: "enable disabled client", + id: disabledClient1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: disabledClient1, + response: endisabledClient1, + err: nil, + }, + { + desc: "enable enabled client", + id: enabledClient1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: enabledClient1, + response: enabledClient1, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "enable non-existing client", + id: mocks.WrongID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: mfclients.Client{}, + response: mfclients.Client{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveByID", context.Background(), tc.id).Return(tc.client, tc.err) + repoCall2 := cRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.EnableClient(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + cases2 := []struct { + desc string + status mfclients.Status + size uint64 + response mfclients.ClientsPage + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus, + size: 2, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, endisabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus, + size: 1, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus, + size: 3, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, disabledClient1, endisabledClient1}, + }, + }, + } + + for _, tc := range cases2 { + pm := mfclients.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + Action: "c_list", + } + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListClients(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestDisableClient(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + enabledClient1 := mfclients.Client{ID: testsutil.GenerateUUID(t, idProvider), Credentials: mfclients.Credentials{Identity: "client1@example.com", Secret: "password"}, Status: mfclients.EnabledStatus} + disabledClient1 := mfclients.Client{ID: testsutil.GenerateUUID(t, idProvider), Credentials: mfclients.Credentials{Identity: "client3@example.com", Secret: "password"}, Status: mfclients.DisabledStatus} + disenabledClient1 := enabledClient1 + disenabledClient1.Status = mfclients.DisabledStatus + + cases := []struct { + desc string + id string + token string + client mfclients.Client + response mfclients.Client + err error + }{ + { + desc: "disable enabled client", + id: enabledClient1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: enabledClient1, + response: disenabledClient1, + err: nil, + }, + { + desc: "disable disabled client", + id: disabledClient1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: disabledClient1, + response: mfclients.Client{}, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "disable non-existing client", + id: mocks.WrongID, + client: mfclients.Client{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + response: mfclients.Client{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveByID", context.Background(), tc.id).Return(tc.client, tc.err) + repoCall2 := cRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.DisableClient(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + cases2 := []struct { + desc string + status mfclients.Status + size uint64 + response mfclients.ClientsPage + }{ + { + desc: "list enabled clients", + status: mfclients.EnabledStatus, + size: 1, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1}, + }, + }, + { + desc: "list disabled clients", + status: mfclients.DisabledStatus, + size: 2, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{disenabledClient1, disabledClient1}, + }, + }, + { + desc: "list enabled and disabled clients", + status: mfclients.AllStatus, + size: 3, + response: mfclients.ClientsPage{ + Page: mfclients.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Clients: []mfclients.Client{enabledClient1, disabledClient1, disenabledClient1}, + }, + }, + } + + for _, tc := range cases2 { + pm := mfclients.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + Action: "c_list", + } + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := cRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListClients(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Clients)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListMembers(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + var nClients = uint64(10) + var aClients = []mfclients.Client{} + for i := uint64(1); i < nClients; i++ { + identity := fmt.Sprintf("member_%d@example.com", i) + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: identity, + Credentials: mfclients.Credentials{ + Identity: identity, + Secret: "password", + }, + Tags: []string{"tag1", "tag2"}, + Metadata: mfclients.Metadata{"role": "client"}, + } + aClients = append(aClients, client) + } + validID := testsutil.GenerateUUID(t, idProvider) + validToken := testsutil.GenerateValidToken(t, validID, svc, cRepo, phasher) + + cases := []struct { + desc string + token string + groupID string + page mfclients.Page + response mfclients.MembersPage + err error + }{ + { + desc: "list clients with authorized token", + token: validToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Subject: validID, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + Members: []mfclients.Client{}, + }, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: validToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Offset: 6, + Limit: nClients, + Status: mfclients.AllStatus, + Subject: validID, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: nClients - 6 - 1, + }, + Members: aClients[6 : nClients-1], + }, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + groupID: testsutil.GenerateUUID(t, idProvider), + page: mfclients.Page{ + Subject: validID, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients with an invalid id", + token: validToken, + groupID: mocks.WrongID, + page: mfclients.Page{ + Subject: validID, + Action: "g_list", + }, + response: mfclients.MembersPage{ + Page: mfclients.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), validID).Return(nil) + repoCall1 := cRepo.On("Members", context.Background(), tc.groupID, tc.page).Return(tc.response, tc.err) + page, err := svc.ListMembers(context.Background(), tc.token, tc.groupID, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), validID) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Members", context.Background(), tc.groupID, tc.page) + assert.True(t, ok, fmt.Sprintf("Members was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestIssueToken(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + rClient := client + rClient2 := client + rClient3 := client + rClient.Credentials.Secret, _ = phasher.Hash(client.Credentials.Secret) + rClient2.Credentials.Secret = "wrongsecret" + rClient3.Credentials.Secret, _ = phasher.Hash("wrongsecret") + + cases := []struct { + desc string + client mfclients.Client + rClient mfclients.Client + err error + }{ + { + desc: "issue token for an existing client", + client: client, + rClient: rClient, + err: nil, + }, + { + desc: "issue token for a non-existing client", + client: client, + rClient: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "issue token for a client with wrong secret", + client: rClient2, + rClient: rClient3, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), tc.client.Credentials.Identity).Return(tc.rClient, tc.err) + token, err := svc.IssueToken(context.Background(), tc.client.Credentials.Identity, tc.client.Credentials.Secret) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, token.AccessToken, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, token.AccessToken)) + assert.NotEmpty(t, token.RefreshToken, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, token.RefreshToken)) + ok := repoCall.Parent.AssertCalled(t, "RetrieveByIdentity", context.Background(), tc.client.Credentials.Identity) + assert.True(t, ok, fmt.Sprintf("RetrieveByIdentity was not called on %s", tc.desc)) + } + repoCall.Unset() + } +} + +func TestRefreshToken(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + svc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + + rClient := client + rClient.Credentials.Secret, _ = phasher.Hash(client.Credentials.Secret) + + repoCall := cRepo.On("RetrieveByIdentity", context.Background(), client.Credentials.Identity).Return(rClient, nil) + token, err := svc.IssueToken(context.Background(), client.Credentials.Identity, client.Credentials.Secret) + assert.Nil(t, err, fmt.Sprintf("Issue token expected nil got %s\n", err)) + repoCall.Unset() + + cases := []struct { + desc string + token string + client mfclients.Client + err error + }{ + { + desc: "refresh token with refresh token for an existing client", + token: token.RefreshToken, + client: client, + err: nil, + }, + { + desc: "refresh token with refresh token for a non-existing client", + token: token.RefreshToken, + client: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "refresh token with access token for an existing client", + token: token.AccessToken, + client: client, + err: errors.ErrAuthentication, + }, + { + desc: "refresh token with access token for a non-existing client", + token: token.AccessToken, + client: mfclients.Client{}, + err: errors.ErrAuthentication, + }, + { + desc: "refresh token with invalid token for an existing client", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), svc, cRepo, phasher), + client: client, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall1 := cRepo.On("RetrieveByIdentity", context.Background(), tc.client.Credentials.Identity).Return(tc.client, nil) + repoCall2 := cRepo.On("RetrieveByID", context.Background(), mock.Anything).Return(tc.client, tc.err) + token, err := svc.RefreshToken(context.Background(), tc.token) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, token.AccessToken, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, token.AccessToken)) + assert.NotEmpty(t, token.RefreshToken, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, token.RefreshToken)) + ok := repoCall1.Parent.AssertCalled(t, "RetrieveByIdentity", context.Background(), tc.client.Credentials.Identity) + assert.True(t, ok, fmt.Sprintf("RetrieveByIdentity was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.client.ID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + repoCall1.Unset() + repoCall2.Unset() + } +} diff --git a/users/clients/tracing/tracing.go b/users/clients/tracing/tracing.go new file mode 100644 index 0000000000..beb2cf7956 --- /dev/null +++ b/users/clients/tracing/tracing.go @@ -0,0 +1,151 @@ +package tracing + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/jwt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ clients.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + svc clients.Service +} + +func TracingMiddleware(svc clients.Service, tracer trace.Tracer) clients.Service { + return &tracingMiddleware{tracer, svc} +} + +func (tm *tracingMiddleware) RegisterClient(ctx context.Context, token string, client mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_register_client", trace.WithAttributes(attribute.String("identity", client.Credentials.Identity))) + defer span.End() + + return tm.svc.RegisterClient(ctx, token, client) +} + +func (tm *tracingMiddleware) IssueToken(ctx context.Context, identity, secret string) (jwt.Token, error) { + ctx, span := tm.tracer.Start(ctx, "svc_issue_token", trace.WithAttributes(attribute.String("identity", identity))) + defer span.End() + + return tm.svc.IssueToken(ctx, identity, secret) +} + +func (tm *tracingMiddleware) RefreshToken(ctx context.Context, accessToken string) (jwt.Token, error) { + ctx, span := tm.tracer.Start(ctx, "svc_refresh_token", trace.WithAttributes(attribute.String("access_token", accessToken))) + defer span.End() + + return tm.svc.RefreshToken(ctx, accessToken) +} +func (tm *tracingMiddleware) ViewClient(ctx context.Context, token string, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_client", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + return tm.svc.ViewClient(ctx, token, id) +} + +func (tm *tracingMiddleware) ListClients(ctx context.Context, token string, pm mfclients.Page) (mfclients.ClientsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_clients") + defer span.End() + return tm.svc.ListClients(ctx, token, pm) +} + +func (tm *tracingMiddleware) UpdateClient(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_name_and_metadata", trace.WithAttributes(attribute.String("Name", cli.Name))) + defer span.End() + + return tm.svc.UpdateClient(ctx, token, cli) +} + +func (tm *tracingMiddleware) UpdateClientTags(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_tags", trace.WithAttributes(attribute.StringSlice("Tags", cli.Tags))) + defer span.End() + + return tm.svc.UpdateClientTags(ctx, token, cli) +} +func (tm *tracingMiddleware) UpdateClientIdentity(ctx context.Context, token, id, identity string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_identity", trace.WithAttributes(attribute.String("Identity", identity))) + defer span.End() + + return tm.svc.UpdateClientIdentity(ctx, token, id, identity) + +} + +func (tm *tracingMiddleware) UpdateClientSecret(ctx context.Context, token, oldSecret, newSecret string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_secret") + defer span.End() + + return tm.svc.UpdateClientSecret(ctx, token, oldSecret, newSecret) + +} + +func (tm *tracingMiddleware) GenerateResetToken(ctx context.Context, email, host string) error { + ctx, span := tm.tracer.Start(ctx, "svc_generate_reset_token") + defer span.End() + + return tm.svc.GenerateResetToken(ctx, email, host) + +} + +func (tm *tracingMiddleware) ResetSecret(ctx context.Context, token, secret string) error { + ctx, span := tm.tracer.Start(ctx, "svc_reset_secret") + defer span.End() + + return tm.svc.ResetSecret(ctx, token, secret) + +} + +func (tm *tracingMiddleware) SendPasswordReset(ctx context.Context, host, email, user, token string) error { + ctx, span := tm.tracer.Start(ctx, "svc_send_password_reset") + defer span.End() + + return tm.svc.SendPasswordReset(ctx, host, email, user, token) + +} + +func (tm *tracingMiddleware) ViewProfile(ctx context.Context, token string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_profile") + defer span.End() + + return tm.svc.ViewProfile(ctx, token) + +} + +func (tm *tracingMiddleware) UpdateClientOwner(ctx context.Context, token string, cli mfclients.Client) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_client_owner", trace.WithAttributes(attribute.StringSlice("Tags", cli.Tags))) + defer span.End() + + return tm.svc.UpdateClientOwner(ctx, token, cli) +} + +func (tm *tracingMiddleware) EnableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_enable_client", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.svc.EnableClient(ctx, token, id) +} + +func (tm *tracingMiddleware) DisableClient(ctx context.Context, token, id string) (mfclients.Client, error) { + ctx, span := tm.tracer.Start(ctx, "svc_disable_client", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.svc.DisableClient(ctx, token, id) +} + +func (tm *tracingMiddleware) ListMembers(ctx context.Context, token, groupID string, pm mfclients.Page) (mfclients.MembersPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_members") + defer span.End() + + return tm.svc.ListMembers(ctx, token, groupID, pm) + +} + +func (tm *tracingMiddleware) Identify(ctx context.Context, token string) (string, error) { + ctx, span := tm.tracer.Start(ctx, "svc_identify", trace.WithAttributes(attribute.String("token", token))) + defer span.End() + + return tm.svc.Identify(ctx, token) +} diff --git a/users/emailer.go b/users/emailer.go deleted file mode 100644 index 5476afaf91..0000000000 --- a/users/emailer.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users - -// Emailer wrapper around the email -type Emailer interface { - SendPasswordReset(To []string, host, token string) error -} diff --git a/users/emailer/emailer.go b/users/emailer/emailer.go deleted file mode 100644 index 9a4cf3526d..0000000000 --- a/users/emailer/emailer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 -package emailer - -import ( - "fmt" - - "github.com/mainflux/mainflux/internal/email" - "github.com/mainflux/mainflux/users" -) - -var _ users.Emailer = (*emailer)(nil) - -type emailer struct { - resetURL string - agent *email.Agent -} - -// New creates new emailer utility -func New(url string, c *email.Config) (users.Emailer, error) { - e, err := email.New(c) - return &emailer{resetURL: url, agent: e}, err -} - -func (e *emailer) SendPasswordReset(To []string, host string, token string) error { - url := fmt.Sprintf("%s%s?token=%s", host, e.resetURL, token) - return e.agent.Send(To, "", "Password reset", "", url, "") -} diff --git a/users/groups/api/endpoints.go b/users/groups/api/endpoints.go new file mode 100644 index 0000000000..029490b1db --- /dev/null +++ b/users/groups/api/endpoints.go @@ -0,0 +1,210 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/groups" +) + +func createGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createGroupReq) + if err := req.validate(); err != nil { + return createGroupRes{}, err + } + + group, err := svc.CreateGroup(ctx, req.token, req.Group) + if err != nil { + return createGroupRes{}, err + } + + return createGroupRes{created: true, Group: group}, nil + } +} + +func viewGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(groupReq) + if err := req.validate(); err != nil { + return viewGroupRes{}, err + } + + group, err := svc.ViewGroup(ctx, req.token, req.id) + if err != nil { + return viewGroupRes{}, err + } + + return viewGroupRes{Group: group}, nil + } +} + +func updateGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updateGroupReq) + if err := req.validate(); err != nil { + return updateGroupRes{}, err + } + + group := mfgroups.Group{ + ID: req.id, + Name: req.Name, + Description: req.Description, + Metadata: req.Metadata, + } + + group, err := svc.UpdateGroup(ctx, req.token, group) + if err != nil { + return updateGroupRes{}, err + } + + return updateGroupRes{Group: group}, nil + } +} + +func enableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + group, err := svc.EnableGroup(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return changeStatusRes{Group: group}, nil + } +} + +func disableGroupEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(changeGroupStatusReq) + if err := req.validate(); err != nil { + return nil, err + } + group, err := svc.DisableGroup(ctx, req.token, req.id) + if err != nil { + return nil, err + } + return changeStatusRes{Group: group}, nil + } +} + +func listGroupsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listGroupsReq) + if err := req.validate(); err != nil { + return groupPageRes{}, err + } + page, err := svc.ListGroups(ctx, req.token, req.GroupsPage) + if err != nil { + return groupPageRes{}, err + } + + if req.tree { + return buildGroupsResponseTree(page), nil + } + + return buildGroupsResponse(page), nil + } +} + +func listMembershipsEndpoint(svc groups.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listMembershipReq) + if err := req.validate(); err != nil { + return membershipPageRes{}, err + } + + page, err := svc.ListMemberships(ctx, req.token, req.clientID, req.GroupsPage) + if err != nil { + return membershipPageRes{}, err + } + + res := membershipPageRes{ + pageRes: pageRes{ + Total: page.Total, + Offset: page.Offset, + Limit: page.Limit, + }, + Memberships: []viewMembershipRes{}, + } + for _, g := range page.Memberships { + res.Memberships = append(res.Memberships, viewMembershipRes{Group: g}) + } + + return res, nil + } +} + +func buildGroupsResponseTree(page mfgroups.GroupsPage) groupPageRes { + groupsMap := map[string]*mfgroups.Group{} + // Parents' map keeps its array of children. + parentsMap := map[string][]*mfgroups.Group{} + for i := range page.Groups { + if _, ok := groupsMap[page.Groups[i].ID]; !ok { + groupsMap[page.Groups[i].ID] = &page.Groups[i] + parentsMap[page.Groups[i].ID] = make([]*mfgroups.Group, 0) + } + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.Parent]; ok { + children = append(children, group) + parentsMap[group.Parent] = children + } + } + + res := groupPageRes{ + pageRes: pageRes{ + Limit: page.Limit, + Offset: page.Offset, + Total: page.Total, + Level: page.Level, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range groupsMap { + if children, ok := parentsMap[group.ID]; ok { + group.Children = children + } + + } + + for _, group := range groupsMap { + view := toViewGroupRes(*group) + if children, ok := parentsMap[group.Parent]; len(children) == 0 || !ok { + res.Groups = append(res.Groups, view) + } + } + + return res +} + +func toViewGroupRes(group mfgroups.Group) viewGroupRes { + view := viewGroupRes{ + Group: group, + } + return view +} + +func buildGroupsResponse(gp mfgroups.GroupsPage) groupPageRes { + res := groupPageRes{ + pageRes: pageRes{ + Total: gp.Total, + Level: gp.Level, + }, + Groups: []viewGroupRes{}, + } + + for _, group := range gp.Groups { + view := viewGroupRes{ + Group: group, + } + res.Groups = append(res.Groups, view) + } + + return res +} diff --git a/users/groups/api/logging.go b/users/groups/api/logging.go new file mode 100644 index 0000000000..36b22ba0a4 --- /dev/null +++ b/users/groups/api/logging.go @@ -0,0 +1,106 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/groups" +) + +var _ groups.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc groups.Service +} + +func LoggingMiddleware(svc groups.Service, logger mflog.Logger) groups.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) CreateGroup(ctx context.Context, token string, group mfgroups.Group) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method create_group for group %s with id %s using token %s took %s to complete", g.Name, g.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.CreateGroup(ctx, token, group) +} + +func (lm *loggingMiddleware) UpdateGroup(ctx context.Context, token string, group mfgroups.Group) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_group for group %s with id %s using token %s took %s to complete", g.Name, g.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdateGroup(ctx, token, group) +} + +func (lm *loggingMiddleware) ViewGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method view_group for group %s with id %s using token %s took %s to complete", g.Name, g.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ViewGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) ListGroups(ctx context.Context, token string, gp mfgroups.GroupsPage) (cg mfgroups.GroupsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_groups %d groups using token %s took %s to complete", cg.Total, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListGroups(ctx, token, gp) +} + +func (lm *loggingMiddleware) EnableGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method enable_group for group with id %s using token %s took %s to complete", g.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.EnableGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) DisableGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method disable_group for group with id %s using token %s took %s to complete", g.ID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DisableGroup(ctx, token, id) +} + +func (lm *loggingMiddleware) ListMemberships(ctx context.Context, token, clientID string, cp mfgroups.GroupsPage) (mp mfgroups.MembershipsPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_memberships for client with id %s using token %s took %s to complete", clientID, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListMemberships(ctx, token, clientID, cp) +} diff --git a/users/groups/api/metrics.go b/users/groups/api/metrics.go new file mode 100644 index 0000000000..a1a12c4926 --- /dev/null +++ b/users/groups/api/metrics.go @@ -0,0 +1,83 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/groups" +) + +var _ groups.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc groups.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc groups.Service, counter metrics.Counter, latency metrics.Histogram) groups.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) CreateGroup(ctx context.Context, token string, g mfgroups.Group) (mfgroups.Group, error) { + defer func(begin time.Time) { + ms.counter.With("method", "create_group").Add(1) + ms.latency.With("method", "create_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.CreateGroup(ctx, token, g) +} + +func (ms *metricsMiddleware) UpdateGroup(ctx context.Context, token string, group mfgroups.Group) (rGroup mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_group").Add(1) + ms.latency.With("method", "update_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdateGroup(ctx, token, group) +} + +func (ms *metricsMiddleware) ViewGroup(ctx context.Context, token, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "view_group").Add(1) + ms.latency.With("method", "view_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ViewGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) ListGroups(ctx context.Context, token string, gp mfgroups.GroupsPage) (cg mfgroups.GroupsPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_groups").Add(1) + ms.latency.With("method", "list_groups").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListGroups(ctx, token, gp) +} + +func (ms *metricsMiddleware) EnableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "enable_group").Add(1) + ms.latency.With("method", "enable_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.EnableGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) DisableGroup(ctx context.Context, token string, id string) (g mfgroups.Group, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "disable_group").Add(1) + ms.latency.With("method", "disable_group").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DisableGroup(ctx, token, id) +} + +func (ms *metricsMiddleware) ListMemberships(ctx context.Context, token, clientID string, gp mfgroups.GroupsPage) (mp mfgroups.MembershipsPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_memberships").Add(1) + ms.latency.With("method", "list_memberships").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListMemberships(ctx, token, clientID, gp) +} diff --git a/users/groups/api/requests.go b/users/groups/api/requests.go new file mode 100644 index 0000000000..3e7e30fe3c --- /dev/null +++ b/users/groups/api/requests.go @@ -0,0 +1,111 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +type createGroupReq struct { + mfgroups.Group + token string +} + +func (req createGroupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if len(req.Name) > api.MaxNameSize || req.Name == "" { + return apiutil.ErrNameSize + } + + return nil +} + +type updateGroupReq struct { + token string + id string + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +func (req updateGroupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + if len(req.Name) > api.MaxNameSize { + return apiutil.ErrNameSize + } + return nil +} + +type listGroupsReq struct { + mfgroups.GroupsPage + token string + // - `true` - result is JSON tree representing groups hierarchy, + // - `false` - result is JSON array of groups. + tree bool +} + +func (req listGroupsReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.Level < mfgroups.MinLevel || req.Level > mfgroups.MaxLevel { + return apiutil.ErrInvalidLevel + } + + return nil +} + +type listMembershipReq struct { + mfgroups.GroupsPage + token string + clientID string +} + +func (req listMembershipReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.clientID == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type groupReq struct { + token string + id string +} + +func (req groupReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + + return nil +} + +type changeGroupStatusReq struct { + token string + id string +} + +func (req changeGroupStatusReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + if req.id == "" { + return apiutil.ErrMissingID + } + return nil +} diff --git a/users/groups/api/responses.go b/users/groups/api/responses.go new file mode 100644 index 0000000000..ffb2ead7f4 --- /dev/null +++ b/users/groups/api/responses.go @@ -0,0 +1,151 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/mainflux/mainflux" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +var ( + _ mainflux.Response = (*viewMembershipRes)(nil) + _ mainflux.Response = (*membershipPageRes)(nil) + _ mainflux.Response = (*createGroupRes)(nil) + _ mainflux.Response = (*groupPageRes)(nil) + _ mainflux.Response = (*changeStatusRes)(nil) + _ mainflux.Response = (*viewGroupRes)(nil) + _ mainflux.Response = (*updateGroupRes)(nil) +) + +type viewMembershipRes struct { + mfgroups.Group `json:",inline"` +} + +func (res viewMembershipRes) Code() int { + return http.StatusOK +} + +func (res viewMembershipRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewMembershipRes) Empty() bool { + return false +} + +type membershipPageRes struct { + pageRes + Memberships []viewMembershipRes `json:"memberships"` +} + +func (res membershipPageRes) Code() int { + return http.StatusOK +} + +func (res membershipPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res membershipPageRes) Empty() bool { + return false +} + +type viewGroupRes struct { + mfgroups.Group `json:",inline"` +} + +func (res viewGroupRes) Code() int { + return http.StatusOK +} + +func (res viewGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewGroupRes) Empty() bool { + return false +} + +type createGroupRes struct { + mfgroups.Group `json:",inline"` + created bool +} + +func (res createGroupRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res createGroupRes) Headers() map[string]string { + if res.created { + return map[string]string{ + "Location": fmt.Sprintf("/groups/%s", res.ID), + } + } + + return map[string]string{} +} + +func (res createGroupRes) Empty() bool { + return false +} + +type groupPageRes struct { + pageRes + Groups []viewGroupRes `json:"groups"` +} + +type pageRes struct { + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Total uint64 `json:"total,omitempty"` + Level uint64 `json:"level,omitempty"` +} + +func (res groupPageRes) Code() int { + return http.StatusOK +} + +func (res groupPageRes) Headers() map[string]string { + return map[string]string{} +} + +func (res groupPageRes) Empty() bool { + return false +} + +type updateGroupRes struct { + mfgroups.Group `json:",inline"` +} + +func (res updateGroupRes) Code() int { + return http.StatusOK +} + +func (res updateGroupRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updateGroupRes) Empty() bool { + return false +} + +type changeStatusRes struct { + mfgroups.Group `json:",inline"` +} + +func (res changeStatusRes) Code() int { + return http.StatusOK +} + +func (res changeStatusRes) Headers() map[string]string { + return map[string]string{} +} + +func (res changeStatusRes) Empty() bool { + return false +} diff --git a/users/groups/api/transport.go b/users/groups/api/transport.go new file mode 100644 index 0000000000..11741d86e7 --- /dev/null +++ b/users/groups/api/transport.go @@ -0,0 +1,377 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/logger" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/groups" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeGroupsHandler returns a HTTP handler for API endpoints. +func MakeGroupsHandler(svc groups.Service, mux *bone.Mux, logger logger.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + mux.Post("/groups", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("create_group"))(createGroupEndpoint(svc)), + decodeGroupCreate, + api.EncodeResponse, + opts..., + )) + + mux.Get("/groups/:groupID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("view_group"))(viewGroupEndpoint(svc)), + decodeGroupRequest, + api.EncodeResponse, + opts..., + )) + + mux.Put("/groups/:groupID", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_group"))(updateGroupEndpoint(svc)), + decodeGroupUpdate, + api.EncodeResponse, + opts..., + )) + + mux.Get("/users/:groupID/memberships", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_memberships"))(listMembershipsEndpoint(svc)), + decodeListMembershipRequest, + api.EncodeResponse, + opts..., + )) + + mux.Get("/groups", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_groups"))(listGroupsEndpoint(svc)), + decodeListGroupsRequest, + api.EncodeResponse, + opts..., + )) + + mux.Get("/groups/:groupID/children", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_children"))(listGroupsEndpoint(svc)), + decodeListChildrenRequest, + api.EncodeResponse, + opts..., + )) + + mux.Get("/groups/:groupID/parents", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_parents"))(listGroupsEndpoint(svc)), + decodeListParentsRequest, + api.EncodeResponse, + opts..., + )) + + mux.Post("/groups/:groupID/enable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("enable_group"))(enableGroupEndpoint(svc)), + decodeChangeGroupStatus, + api.EncodeResponse, + opts..., + )) + + mux.Post("/groups/:groupID/disable", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("disable_group"))(disableGroupEndpoint(svc)), + decodeChangeGroupStatus, + api.EncodeResponse, + opts..., + )) + + return mux +} + +func decodeListMembershipRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + parentID, err := apiutil.ReadStringQuery(r, api.ParentKey, "") + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + dir, err := apiutil.ReadNumQuery[int64](r, api.DirKey, -1) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listMembershipReq{ + token: apiutil.ExtractBearerToken(r), + clientID: bone.GetValue(r, "groupID"), + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: parentID, + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: dir, + }, + } + return req, nil + +} + +func decodeListGroupsRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + parentID, err := apiutil.ReadStringQuery(r, api.ParentKey, "") + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, err + } + dir, err := apiutil.ReadNumQuery[int64](r, api.DirKey, -1) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: parentID, + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: dir, + }, + } + return req, nil +} + +func decodeListParentsRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: bone.GetValue(r, "groupID"), + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: 1, + }, + } + return req, nil +} + +func decodeListChildrenRequest(_ context.Context, r *http.Request) (interface{}, error) { + s, err := apiutil.ReadStringQuery(r, api.StatusKey, api.DefGroupStatus) + if err != nil { + return nil, err + } + level, err := apiutil.ReadNumQuery[uint64](r, api.LevelKey, api.DefLevel) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + name, err := apiutil.ReadStringQuery(r, api.NameKey, "") + if err != nil { + return nil, err + } + meta, err := apiutil.ReadMetadataQuery(r, api.MetadataKey, nil) + if err != nil { + return nil, err + } + tree, err := apiutil.ReadBoolQuery(r, api.TreeKey, false) + if err != nil { + return nil, err + } + st, err := mfclients.ToStatus(s) + if err != nil { + return nil, err + } + req := listGroupsReq{ + token: apiutil.ExtractBearerToken(r), + tree: tree, + GroupsPage: mfgroups.GroupsPage{ + Level: level, + ID: bone.GetValue(r, "groupID"), + Page: mfgroups.Page{ + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Name: name, + Metadata: meta, + Status: st, + }, + Direction: -1, + }, + } + return req, nil +} + +func decodeGroupCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + var g mfgroups.Group + if err := json.NewDecoder(r.Body).Decode(&g); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + req := createGroupReq{ + Group: g, + token: apiutil.ExtractBearerToken(r), + } + + return req, nil +} + +func decodeGroupUpdate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + req := updateGroupReq{ + id: bone.GetValue(r, "groupID"), + token: apiutil.ExtractBearerToken(r), + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + return req, nil +} + +func decodeGroupRequest(_ context.Context, r *http.Request) (interface{}, error) { + req := groupReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "groupID"), + } + return req, nil +} + +func decodeChangeGroupStatus(_ context.Context, r *http.Request) (interface{}, error) { + req := changeGroupStatusReq{ + token: apiutil.ExtractBearerToken(r), + id: bone.GetValue(r, "groupID"), + } + return req, nil +} diff --git a/users/groups/groups.go b/users/groups/groups.go new file mode 100644 index 0000000000..918a5daff7 --- /dev/null +++ b/users/groups/groups.go @@ -0,0 +1,30 @@ +package groups + +import ( + "context" + + "github.com/mainflux/mainflux/pkg/groups" +) + +type GroupService interface { + // CreateGroup creates new group. + CreateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) + + // UpdateGroup updates the group identified by the provided ID. + UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) + + // ViewGroup retrieves data about the group identified by ID. + ViewGroup(ctx context.Context, token, id string) (groups.Group, error) + + // ListGroups retrieves groups. + ListGroups(ctx context.Context, token string, gm groups.GroupsPage) (groups.GroupsPage, error) + + // ListMemberships retrieves everything that is assigned to a group identified by clientID. + ListMemberships(ctx context.Context, token, clientID string, gm groups.GroupsPage) (groups.MembershipsPage, error) + + // EnableGroup logically enables the group identified with the provided ID. + EnableGroup(ctx context.Context, token, id string) (groups.Group, error) + + // DisableGroup logically disables the group identified with the provided ID. + DisableGroup(ctx context.Context, token, id string) (groups.Group, error) +} diff --git a/users/groups/mocks/groups.go b/users/groups/mocks/groups.go new file mode 100644 index 0000000000..2ac974a984 --- /dev/null +++ b/users/groups/mocks/groups.go @@ -0,0 +1,77 @@ +package mocks + +import ( + "context" + + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/stretchr/testify/mock" +) + +const WrongID = "wrongID" + +var _ mfgroups.Repository = (*GroupRepository)(nil) + +type GroupRepository struct { + mock.Mock +} + +func (m *GroupRepository) ChangeStatus(ctx context.Context, group mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, group) + + if group.ID == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + if group.Status != mfclients.EnabledStatus && group.Status != mfclients.DisabledStatus { + return mfgroups.Group{}, errors.ErrMalformedEntity + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} + +func (m *GroupRepository) Memberships(ctx context.Context, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + ret := m.Called(ctx, clientID, gm) + + if clientID == WrongID { + return mfgroups.MembershipsPage{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.MembershipsPage), ret.Error(1) +} + +func (m *GroupRepository) RetrieveAll(ctx context.Context, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + ret := m.Called(ctx, gm) + + return ret.Get(0).(mfgroups.GroupsPage), ret.Error(1) +} + +func (m *GroupRepository) RetrieveByID(ctx context.Context, id string) (mfgroups.Group, error) { + ret := m.Called(ctx, id) + if id == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} + +func (m *GroupRepository) Save(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, g) + if g.Parent == WrongID { + return mfgroups.Group{}, errors.ErrCreateEntity + } + if g.Owner == WrongID { + return mfgroups.Group{}, errors.ErrCreateEntity + } + + return g, ret.Error(1) +} + +func (m *GroupRepository) Update(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + ret := m.Called(ctx, g) + if g.ID == WrongID { + return mfgroups.Group{}, errors.ErrNotFound + } + + return ret.Get(0).(mfgroups.Group), ret.Error(1) +} diff --git a/users/groups/postgres/doc.go b/users/groups/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/users/groups/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/users/groups/postgres/groups.go b/users/groups/postgres/groups.go new file mode 100644 index 0000000000..2ddc2ddc27 --- /dev/null +++ b/users/groups/postgres/groups.go @@ -0,0 +1,440 @@ +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/mainflux/mainflux/internal/postgres" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" +) + +var _ mfgroups.Repository = (*groupRepository)(nil) + +type groupRepository struct { + db postgres.Database +} + +// NewGroupRepo instantiates a PostgreSQL implementation of group +// repository. +func NewGroupRepo(db postgres.Database) mfgroups.Repository { + return &groupRepository{ + db: db, + } +} + +// TODO - check parent group write access. +func (repo groupRepository) Save(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + q := `INSERT INTO groups (name, description, id, owner_id, parent_id, metadata, created_at, status) + VALUES (:name, :description, :id, :owner_id, :parent_id, :metadata, :created_at, :status) + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, status;` + + dbg, err := toDBGroup(g) + if err != nil { + return mfgroups.Group{}, err + } + row, err := repo.db.NamedQueryContext(ctx, q, dbg) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrCreateEntity) + } + + defer row.Close() + row.Next() + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mfgroups.Group{}, err + } + + return toGroup(dbg) +} + +func (repo groupRepository) RetrieveByID(ctx context.Context, id string) (mfgroups.Group, error) { + q := `SELECT id, name, owner_id, COALESCE(parent_id, '') AS parent_id, description, metadata, created_at, updated_at, updated_by, status FROM groups + WHERE id = :id` + + dbg := dbGroup{ + ID: id, + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbg) + if err != nil { + if err == sql.ErrNoRows { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, err) + } + return mfgroups.Group{}, errors.Wrap(errors.ErrViewEntity, err) + } + + defer row.Close() + row.Next() + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, err) + } + + return toGroup(dbg) + +} + +func (repo groupRepository) RetrieveAll(ctx context.Context, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + var q string + query, err := buildQuery(gm) + if err != nil { + return mfgroups.GroupsPage{}, err + } + + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT g.id, g.owner_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + q = fmt.Sprintf("%s %s ORDER BY g.updated_at LIMIT :limit OFFSET :offset;", q, query) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + defer rows.Close() + + var items []mfgroups.Group + for rows.Next() { + dbg := dbGroup{} + if err := rows.StructScan(&dbg); err != nil { + return mfgroups.GroupsPage{}, err + } + group, err := toGroup(dbg) + if err != nil { + return mfgroups.GroupsPage{}, err + } + items = append(items, group) + } + + cq := "SELECT COUNT(*) FROM groups g" + if query != "" { + cq = fmt.Sprintf(" %s %s", cq, query) + } + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfgroups.GroupsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveAll, err) + } + + page := gm + page.Groups = items + page.Total = total + + return page, nil +} + +func (repo groupRepository) Memberships(ctx context.Context, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + var q string + query, err := buildQuery(gm) + if err != nil { + return mfgroups.MembershipsPage{}, err + } + if gm.ID != "" { + q = buildHierachy(gm) + } + if gm.ID == "" { + q = `SELECT g.id, g.owner_id, COALESCE(g.parent_id, '') AS parent_id, g.name, g.description, + g.metadata, g.created_at, g.updated_at, g.updated_by, g.status FROM groups g` + } + aq := "" + // If not admin, the client needs to have a g_list action on the group + if gm.Subject != "" { + aq = `AND policies.object IN (SELECT object FROM policies WHERE subject = :subject AND :action=ANY(actions))` + } + q = fmt.Sprintf(`%s INNER JOIN policies ON g.id=policies.object %s AND policies.subject = :client_id %s + ORDER BY g.updated_at LIMIT :limit OFFSET :offset;`, q, query, aq) + + dbPage, err := toDBGroupPage(gm) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + dbPage.ClientID = clientID + rows, err := repo.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + defer rows.Close() + + var items []mfgroups.Group + for rows.Next() { + dbg := dbGroup{} + if err := rows.StructScan(&dbg); err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + group, err := toGroup(dbg) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + items = append(items, group) + } + + cq := fmt.Sprintf(`SELECT COUNT(*) FROM groups g INNER JOIN policies + ON g.id=policies.object %s AND policies.subject = :client_id`, query) + + total, err := postgres.Total(ctx, repo.db, cq, dbPage) + if err != nil { + return mfgroups.MembershipsPage{}, errors.Wrap(postgres.ErrFailedToRetrieveMembership, err) + } + page := mfgroups.MembershipsPage{ + Memberships: items, + Page: mfgroups.Page{ + Total: total, + }, + } + + return page, nil +} + +func (repo groupRepository) Update(ctx context.Context, g mfgroups.Group) (mfgroups.Group, error) { + var query []string + var upq string + if g.Name != "" { + query = append(query, "name = :name,") + } + if g.Description != "" { + query = append(query, "description = :description,") + } + if g.Metadata != nil { + query = append(query, "metadata = :metadata,") + } + if len(query) > 0 { + upq = strings.Join(query, " ") + } + g.Status = mfclients.EnabledStatus + q := fmt.Sprintf(`UPDATE groups SET %s updated_at = :updated_at, updated_by = :updated_by + WHERE id = :id AND status = :status + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status`, upq) + + dbu, err := toDBGroup(g) + if err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + + row, err := repo.db.NamedQueryContext(ctx, q, dbu) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbu = dbGroup{} + if err := row.StructScan(&dbu); err != nil { + return mfgroups.Group{}, errors.Wrap(err, errors.ErrUpdateEntity) + } + return toGroup(dbu) +} + +func (repo groupRepository) ChangeStatus(ctx context.Context, group mfgroups.Group) (mfgroups.Group, error) { + qc := `UPDATE groups SET status = :status WHERE id = :id + RETURNING id, name, description, owner_id, COALESCE(parent_id, '') AS parent_id, metadata, created_at, updated_at, updated_by, status` + + dbg, err := toDBGroup(group) + if err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrUpdateEntity, err) + } + row, err := repo.db.NamedQueryContext(ctx, qc, dbg) + if err != nil { + return mfgroups.Group{}, postgres.HandleError(err, errors.ErrUpdateEntity) + } + + defer row.Close() + if ok := row.Next(); !ok { + return mfgroups.Group{}, errors.Wrap(errors.ErrNotFound, row.Err()) + } + dbg = dbGroup{} + if err := row.StructScan(&dbg); err != nil { + return mfgroups.Group{}, errors.Wrap(err, errors.ErrUpdateEntity) + } + + return toGroup(dbg) +} + +func buildHierachy(gm mfgroups.GroupsPage) string { + query := "" + switch { + case gm.Direction >= 0: // ancestors + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, owner_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.owner_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level - 1 from groups x + INNER JOIN groups_cte a ON a.parent_id = x.id + ) SELECT * FROM groups_cte g` + + case gm.Direction < 0: // descendants + query = `WITH RECURSIVE groups_cte as ( + SELECT id, COALESCE(parent_id, '') AS parent_id, owner_id, name, description, metadata, created_at, updated_at, updated_by, status, 0 as level, CONCAT('', '', id) as path from groups WHERE id = :id + UNION SELECT x.id, COALESCE(x.parent_id, '') AS parent_id, x.owner_id, x.name, x.description, x.metadata, x.created_at, x.updated_at, x.updated_by, x.status, level + 1, CONCAT(path, '.', x.id) as path from groups x + INNER JOIN groups_cte d ON d.id = x.parent_id + ) SELECT * FROM groups_cte g` + } + return query +} +func buildQuery(gm mfgroups.GroupsPage) (string, error) { + queries := []string{} + + if gm.Name != "" { + queries = append(queries, "g.name = :name") + } + if gm.Status != mfclients.AllStatus { + queries = append(queries, "g.status = :status") + } + + if gm.Subject != "" { + queries = append(queries, "(g.owner_id = :owner_id OR id IN (SELECT object as id FROM policies WHERE subject = :subject AND :action=ANY(actions)))") + } + if len(gm.Metadata) > 0 { + queries = append(queries, "'g.metadata @> :metadata'") + } + if len(queries) > 0 { + return fmt.Sprintf("WHERE %s", strings.Join(queries, " AND ")), nil + } + return "", nil +} + +type dbGroup struct { + ID string `db:"id"` + ParentID *string `db:"parent_id,omitempty"` + OwnerID string `db:"owner_id,omitempty"` + Name string `db:"name"` + Description string `db:"description,omitempty"` + Level int `db:"level"` + Path string `db:"path,omitempty"` + Metadata []byte `db:"metadata,omitempty"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` + Status mfclients.Status `db:"status"` +} + +func toDBGroup(g mfgroups.Group) (dbGroup, error) { + data := []byte("{}") + if len(g.Metadata) > 0 { + b, err := json.Marshal(g.Metadata) + if err != nil { + return dbGroup{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + var parentID *string + if g.Parent != "" { + parentID = &g.Parent + } + var updatedAt sql.NullTime + if !g.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: g.UpdatedAt, Valid: true} + } + var updatedBy *string + if g.UpdatedBy != "" { + updatedBy = &g.UpdatedBy + } + return dbGroup{ + ID: g.ID, + Name: g.Name, + ParentID: parentID, + OwnerID: g.Owner, + Description: g.Description, + Metadata: data, + Path: g.Path, + CreatedAt: g.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + Status: g.Status, + }, nil +} + +func toGroup(g dbGroup) (mfgroups.Group, error) { + var metadata mfclients.Metadata + if g.Metadata != nil { + if err := json.Unmarshal([]byte(g.Metadata), &metadata); err != nil { + return mfgroups.Group{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + } + var parentID string + if g.ParentID != nil { + parentID = *g.ParentID + } + var updatedAt time.Time + if g.UpdatedAt.Valid { + updatedAt = g.UpdatedAt.Time + } + var updatedBy string + if g.UpdatedBy != nil { + updatedBy = *g.UpdatedBy + } + + return mfgroups.Group{ + ID: g.ID, + Name: g.Name, + Parent: parentID, + Owner: g.OwnerID, + Description: g.Description, + Metadata: metadata, + Level: g.Level, + Path: g.Path, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + CreatedAt: g.CreatedAt, + Status: g.Status, + }, nil +} + +func toDBGroupPage(pm mfgroups.GroupsPage) (dbGroupPage, error) { + level := mfgroups.MaxLevel + if pm.Level < mfgroups.MaxLevel { + level = pm.Level + } + data := []byte("{}") + if len(pm.Metadata) > 0 { + b, err := json.Marshal(pm.Metadata) + if err != nil { + return dbGroupPage{}, errors.Wrap(errors.ErrMalformedEntity, err) + } + data = b + } + return dbGroupPage{ + ID: pm.ID, + Name: pm.Name, + Metadata: data, + Path: pm.Path, + Level: level, + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + ParentID: pm.ID, + OwnerID: pm.OwnerID, + Subject: pm.Subject, + Action: pm.Action, + Status: pm.Status, + }, nil +} + +type dbGroupPage struct { + ClientID string `db:"client_id"` + ID string `db:"id"` + Name string `db:"name"` + ParentID string `db:"parent_id"` + OwnerID string `db:"owner_id"` + Metadata []byte `db:"metadata"` + Path string `db:"path"` + Level uint64 `db:"level"` + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + Subject string `db:"subject"` + Action string `db:"action"` + Status mfclients.Status `db:"status"` +} diff --git a/users/groups/postgres/groups_test.go b/users/groups/postgres/groups_test.go new file mode 100644 index 0000000000..327e974c89 --- /dev/null +++ b/users/groups/postgres/groups_test.go @@ -0,0 +1,594 @@ +package postgres_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/users/clients/postgres" + gpostgres "github.com/mainflux/mainflux/users/groups/postgres" + "github.com/mainflux/mainflux/users/policies" + ppostgres "github.com/mainflux/mainflux/users/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + maxNameSize = 254 + maxDescSize = 1024 + maxLevel = uint64(5) + groupName = "group" + description = "description" +) + +var ( + wrongID = "wrong-id" + invalidName = strings.Repeat("m", maxNameSize+10) + validDesc = strings.Repeat("m", 100) + invalidDesc = strings.Repeat("m", maxDescSize+1) + metadata = mfclients.Metadata{ + "admin": "true", + } + password = "$tr0ngPassw0rd" + idProvider = uuid.New() +) + +func TestGroupSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewGroupRepo(database) + + usrID := testsutil.GenerateUUID(t, idProvider) + grpID := testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "create new group successfully", + group: mfgroups.Group{ + ID: grpID, + Name: groupName, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a new group with an existing name", + group: mfgroups.Group{ + ID: grpID, + Name: groupName, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrConflict, + }, + { + desc: "create group with an invalid name", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: invalidName, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with invalid ID", + group: mfgroups.Group{ + ID: usrID, + Name: "withInvalidDescription", + Description: invalidDesc, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create group with description", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withDescription", + Description: validDesc, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create group with invalid description", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withInvalidDescription", + Description: invalidDesc, + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create group with parent", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Parent: grpID, + Name: "withParent", + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a group with an invalid parent", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Parent: invalidName, + Name: "withInvalidParent", + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with an owner", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: usrID, + Name: "withOwner", + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create a group with an invalid owner", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Owner: invalidName, + Name: "withInvalidOwner", + Status: mfclients.EnabledStatus, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "create a group with metadata", + group: mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "withMetadata", + Metadata: metadata, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + } + + for _, tc := range cases { + _, err := groupRepo.Save(context.Background(), tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + +} + +func TestGroupRetrieveByID(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewGroupRepo(database) + + uid := testsutil.GenerateUUID(t, idProvider) + group1 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: groupName + "TestGroupRetrieveByID1", + Owner: uid, + Status: mfclients.EnabledStatus, + } + + _, err := groupRepo.Save(context.Background(), group1) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + retrieved, err := groupRepo.RetrieveByID(context.Background(), group1.ID) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.True(t, retrieved.ID == group1.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group1.ID, retrieved.ID)) + + // Round to milliseconds as otherwise saving and retrieving from DB + // adds rounding error. + creationTime := time.Now().UTC().Round(time.Millisecond) + group2 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: groupName + "TestGroupRetrieveByID", + Owner: uid, + Parent: group1.ID, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Description: description, + Metadata: metadata, + Status: mfclients.EnabledStatus, + } + + _, err = groupRepo.Save(context.Background(), group2) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + + retrieved, err = groupRepo.RetrieveByID(context.Background(), group2.ID) + require.Nil(t, err, fmt.Sprintf("got unexpected error: %s", err)) + assert.True(t, retrieved.ID == group2.ID, fmt.Sprintf("Save group, ID: expected %s got %s\n", group2.ID, retrieved.ID)) + assert.True(t, retrieved.CreatedAt.Equal(creationTime), fmt.Sprintf("Save group, CreatedAt: expected %s got %s\n", creationTime, retrieved.CreatedAt)) + assert.True(t, retrieved.Parent == group1.ID, fmt.Sprintf("Save group, Level: expected %s got %s\n", group1.ID, retrieved.Parent)) + assert.True(t, retrieved.Description == description, fmt.Sprintf("Save group, Description: expected %v got %v\n", retrieved.Description, description)) + + retrieved, err = groupRepo.RetrieveByID(context.Background(), testsutil.GenerateUUID(t, idProvider)) + assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Retrieve group: expected %s got %s\n", errors.ErrNotFound, err)) +} + +func TestGroupRetrieveAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewGroupRepo(database) + + var nGroups = uint64(200) + var ownerID = testsutil.GenerateUUID(t, idProvider) + var parentID string + for i := uint64(0); i < nGroups; i++ { + creationTime := time.Now().UTC() + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: fmt.Sprintf("%s-%d", groupName, i), + Description: fmt.Sprintf("%s-description-%d", groupName, i), + CreatedAt: creationTime, + UpdatedAt: creationTime, + Status: mfclients.EnabledStatus, + } + if i == 1 { + parentID = group.ID + } + if i%10 == 0 { + group.Owner = ownerID + group.Parent = parentID + } + if i%50 == 0 { + group.Status = mfclients.DisabledStatus + } + _, err := groupRepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s\n", err)) + parentID = group.ID + } + + cases := map[string]struct { + Size uint64 + Metadata mfgroups.GroupsPage + }{ + "retrieve all groups": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: nGroups, + }, + "retrieve all groups with offset": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 50, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: nGroups - 50, + }, + "retrieve all groups with limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 0, + Limit: 50, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 50, + }, + "retrieve all groups with offset and limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 50, + Limit: 50, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 50, + }, + "retrieve all groups with offset greater than limit": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 250, + Limit: nGroups, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 0, + }, + "retrieve all groups with owner id": { + Metadata: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Limit: nGroups, + Subject: ownerID, + OwnerID: ownerID, + Status: mfclients.AllStatus, + }, + Level: maxLevel, + }, + Size: 20, + }, + } + + for desc, tc := range cases { + page, err := groupRepo.RetrieveAll(context.Background(), tc.Metadata) + size := len(page.Groups) + assert.Equal(t, tc.Size, uint64(size), fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.Size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestGroupUpdate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + groupRepo := gpostgres.NewGroupRepo(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + creationTime := time.Now().UTC() + updateTime := time.Now().UTC() + groupID := testsutil.GenerateUUID(t, idProvider) + + group := mfgroups.Group{ + ID: groupID, + Name: groupName + "TestGroupUpdate", + Owner: uid, + CreatedAt: creationTime, + UpdatedAt: creationTime, + Description: description, + Metadata: metadata, + Status: mfclients.EnabledStatus, + } + updatedName := groupName + "Updated" + updatedMetadata := mfclients.Metadata{"admin": "false"} + updatedDescription := description + "updated" + _, err := groupRepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) + + retrieved, err := groupRepo.RetrieveByID(context.Background(), group.ID) + require.Nil(t, err, fmt.Sprintf("group save got unexpected error: %s", err)) + + cases := []struct { + desc string + groupUpdate mfgroups.Group + groupExpected mfgroups.Group + err error + }{ + { + desc: "update group name for existing id", + groupUpdate: mfgroups.Group{ + ID: groupID, + Name: updatedName, + UpdatedAt: updateTime, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + Metadata: retrieved.Metadata, + Description: retrieved.Description, + }, + err: nil, + }, + { + desc: "update group metadata for existing id", + groupUpdate: mfgroups.Group{ + ID: groupID, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Description: retrieved.Description, + }, + err: nil, + }, + { + desc: "update group description for existing id", + groupUpdate: mfgroups.Group{ + ID: groupID, + UpdatedAt: updateTime, + Description: updatedDescription, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + Description: updatedDescription, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + }, + err: nil, + }, + { + desc: "update group name and metadata for existing id", + groupUpdate: mfgroups.Group{ + ID: groupID, + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + }, + groupExpected: mfgroups.Group{ + Name: updatedName, + UpdatedAt: updateTime, + Metadata: updatedMetadata, + Description: updatedDescription, + }, + err: nil, + }, + { + desc: "update group for invalid name", + groupUpdate: mfgroups.Group{ + ID: groupID, + Name: invalidName, + }, + err: errors.ErrMalformedEntity, + }, + { + desc: "update group for invalid description", + groupUpdate: mfgroups.Group{ + ID: groupID, + Description: invalidDesc, + }, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + updated, err := groupRepo.Update(context.Background(), tc.groupUpdate) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.True(t, updated.Name == tc.groupExpected.Name, fmt.Sprintf("%s:Name: expected %s got %s\n", tc.desc, tc.groupExpected.Name, updated.Name)) + assert.True(t, updated.Description == tc.groupExpected.Description, fmt.Sprintf("%s:Description: expected %s got %s\n", tc.desc, tc.groupExpected.Description, updated.Description)) + assert.True(t, updated.Metadata["admin"] == tc.groupExpected.Metadata["admin"], fmt.Sprintf("%s:Metadata: expected %d got %d\n", tc.desc, tc.groupExpected.Metadata["admin"], updated.Metadata["admin"])) + } + } +} + +func TestClientsMemberships(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + crepo := cpostgres.NewClientRepo(database) + grepo := gpostgres.NewGroupRepo(database) + prepo := ppostgres.NewPolicyRepo(database) + + clientA := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "client-memberships", + Credentials: mfclients.Credentials{ + Identity: "client-memberships1@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + clientB := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "client-memberships", + Credentials: mfclients.Credentials{ + Identity: "client-memberships2@example.com", + Secret: password, + }, + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "group-membership", + Metadata: mfclients.Metadata{}, + Status: mfclients.EnabledStatus, + } + + policyA := policies.Policy{ + Subject: clientA.ID, + Object: group.ID, + Actions: []string{"g_list"}, + } + policyB := policies.Policy{ + Subject: clientB.ID, + Object: group.ID, + Actions: []string{"g_list"}, + } + + _, err := crepo.Save(context.Background(), clientA) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save client: expected %v got %s\n", nil, err)) + _, err = crepo.Save(context.Background(), clientB) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save client: expected %v got %s\n", nil, err)) + _, err = grepo.Save(context.Background(), group) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save group: expected %v got %s\n", nil, err)) + err = prepo.Save(context.Background(), policyA) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save policy: expected %v got %s\n", nil, err)) + err = prepo.Save(context.Background(), policyB) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("save policy: expected %v got %s\n", nil, err)) + + cases := map[string]struct { + ID string + err error + }{ + "retrieve membership for existing client": {clientA.ID, nil}, + "retrieve membership for non-existing client": {wrongID, nil}, + } + + for desc, tc := range cases { + mp, err := grepo.Memberships(context.Background(), tc.ID, mfgroups.GroupsPage{Page: mfgroups.Page{Total: 10, Offset: 0, Limit: 10, Status: mfclients.AllStatus, Subject: clientB.ID, Action: "g_list"}}) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + if tc.ID == clientA.ID { + assert.ElementsMatch(t, mp.Memberships, []mfgroups.Group{group}, fmt.Sprintf("%s: expected %v got %v\n", desc, []mfgroups.Group{group}, mp.Memberships)) + } + } +} + +func TestGroupChangeStatus(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + dbMiddleware := postgres.NewDatabase(db, tracer) + repo := gpostgres.NewGroupRepo(dbMiddleware) + + group1 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "active-group", + Status: mfclients.EnabledStatus, + } + group2 := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "inactive-group", + Status: mfclients.DisabledStatus, + } + + group1, err := repo.Save(context.Background(), group1) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new group: expected %v got %s\n", nil, err)) + group2, err = repo.Save(context.Background(), group2) + assert.True(t, errors.Contains(err, nil), fmt.Sprintf("add new disabled group: expected %v got %s\n", nil, err)) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "change group status for an active group", + group: mfgroups.Group{ + ID: group1.ID, + Status: mfclients.DisabledStatus, + }, + err: nil, + }, + { + desc: "change group status for a inactive group", + group: mfgroups.Group{ + ID: group2.ID, + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "change group status for an invalid group", + group: mfgroups.Group{ + ID: "invalid", + Status: mfclients.DisabledStatus, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + expected, err := repo.ChangeStatus(context.Background(), tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.Equal(t, tc.group.Status, expected.Status, fmt.Sprintf("%s: expected %d got %d\n", tc.desc, tc.group.Status, expected.Status)) + } + } +} diff --git a/users/groups/postgres/setup_test.go b/users/groups/postgres/setup_test.go new file mode 100644 index 0000000000..e377f955a8 --- /dev/null +++ b/users/groups/postgres/setup_test.go @@ -0,0 +1,93 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + upostgres "github.com/mainflux/mainflux/users/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgClient.SetupDB(dbConfig, *upostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/users/groups/service.go b/users/groups/service.go new file mode 100644 index 0000000000..94277573fc --- /dev/null +++ b/users/groups/service.go @@ -0,0 +1,205 @@ +package groups + +import ( + "context" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/jwt" + "github.com/mainflux/mainflux/users/policies" +) + +// Possible token types are access and refresh tokens. +const ( + RefreshToken = "refresh" + AccessToken = "access" + MyKey = "mine" + groupsObjectKey = "groups" + updateRelationKey = "g_update" + listRelationKey = "g_list" + deleteRelationKey = "g_delete" + entityType = "group" +) + +// Service unites Clients and Group services. +type Service interface { + GroupService +} + +type service struct { + groups groups.Repository + policies policies.PolicyRepository + tokens jwt.TokenRepository + idProvider mainflux.IDProvider +} + +// NewService returns a new Clients service implementation. +func NewService(g groups.Repository, p policies.PolicyRepository, t jwt.TokenRepository, idp mainflux.IDProvider) Service { + return service{ + groups: g, + policies: p, + tokens: t, + idProvider: idp, + } +} + +func (svc service) CreateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) { + ownerID, err := svc.identify(ctx, token) + if err != nil { + return groups.Group{}, err + } + groupID, err := svc.idProvider.ID() + if err != nil { + return groups.Group{}, err + } + if g.Status != mfclients.EnabledStatus && g.Status != mfclients.DisabledStatus { + return groups.Group{}, apiutil.ErrInvalidStatus + } + if g.Owner == "" { + g.Owner = ownerID + } + + g.ID = groupID + g.CreatedAt = time.Now() + + return svc.groups.Save(ctx, g) +} + +func (svc service) ViewGroup(ctx context.Context, token string, id string) (groups.Group, error) { + if err := svc.authorizeByToken(ctx, entityType, policies.Policy{Subject: token, Object: id, Actions: []string{listRelationKey}}); err != nil { + return groups.Group{}, err + } + + return svc.groups.RetrieveByID(ctx, id) +} + +func (svc service) ListGroups(ctx context.Context, token string, gm groups.GroupsPage) (groups.GroupsPage, error) { + id, err := svc.identify(ctx, token) + if err != nil { + return groups.GroupsPage{}, err + } + // If the user is admin, fetch all groups from the database. + if err := svc.authorizeByID(ctx, entityType, policies.Policy{Subject: id, Object: groupsObjectKey, Actions: []string{listRelationKey}}); err == nil { + return svc.groups.RetrieveAll(ctx, gm) + } + gm.Subject = id + gm.OwnerID = id + gm.Action = listRelationKey + return svc.groups.RetrieveAll(ctx, gm) +} + +func (svc service) ListMemberships(ctx context.Context, token, clientID string, gm groups.GroupsPage) (groups.MembershipsPage, error) { + id, err := svc.identify(ctx, token) + if err != nil { + return groups.MembershipsPage{}, err + } + // If the user is admin, fetch all members from the database. + if err := svc.authorizeByID(ctx, entityType, policies.Policy{Subject: id, Object: groupsObjectKey, Actions: []string{listRelationKey}}); err == nil { + return svc.groups.Memberships(ctx, clientID, gm) + } + + gm.Subject = id + gm.Action = listRelationKey + return svc.groups.Memberships(ctx, clientID, gm) +} + +func (svc service) UpdateGroup(ctx context.Context, token string, g groups.Group) (groups.Group, error) { + id, err := svc.identify(ctx, token) + if err != nil { + return groups.Group{}, err + } + if err := svc.authorizeByID(ctx, entityType, policies.Policy{Subject: id, Object: g.ID, Actions: []string{updateRelationKey}}); err != nil { + return groups.Group{}, err + } + g.UpdatedAt = time.Now() + g.UpdatedBy = id + + return svc.groups.Update(ctx, g) +} + +func (svc service) EnableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mfclients.EnabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, err + } + return group, nil +} + +func (svc service) DisableGroup(ctx context.Context, token, id string) (groups.Group, error) { + group := groups.Group{ + ID: id, + Status: mfclients.DisabledStatus, + UpdatedAt: time.Now(), + } + group, err := svc.changeGroupStatus(ctx, token, group) + if err != nil { + return groups.Group{}, err + } + return group, nil +} + +func (svc service) authorizeByID(ctx context.Context, entityType string, p policies.Policy) error { + if err := p.Validate(); err != nil { + return err + } + if err := svc.policies.CheckAdmin(ctx, p.Subject); err == nil { + return nil + } + return svc.policies.Evaluate(ctx, entityType, p) +} + +func (svc service) authorizeByToken(ctx context.Context, entityType string, p policies.Policy) error { + if err := p.Validate(); err != nil { + return err + } + id, err := svc.identify(ctx, p.Subject) + if err != nil { + return err + } + if err = svc.policies.CheckAdmin(ctx, id); err == nil { + return nil + } + p.Subject = id + return svc.policies.Evaluate(ctx, entityType, p) +} + +func (svc service) changeGroupStatus(ctx context.Context, token string, group groups.Group) (groups.Group, error) { + id, err := svc.identify(ctx, token) + if err != nil { + return groups.Group{}, err + } + if err := svc.authorizeByID(ctx, entityType, policies.Policy{Subject: id, Object: group.ID, Actions: []string{deleteRelationKey}}); err != nil { + return groups.Group{}, err + } + dbGroup, err := svc.groups.RetrieveByID(ctx, group.ID) + if err != nil { + return groups.Group{}, err + } + if dbGroup.Status == group.Status { + return groups.Group{}, mfclients.ErrStatusAlreadyAssigned + } + + group.UpdatedBy = id + return svc.groups.ChangeStatus(ctx, group) +} + +func (svc service) identify(ctx context.Context, tkn string) (string, error) { + claims, err := svc.tokens.Parse(ctx, tkn) + if err != nil { + return "", errors.Wrap(errors.ErrAuthentication, err) + } + if claims.Type != AccessToken { + return "", errors.ErrAuthentication + } + + return claims.ClientID, nil +} diff --git a/users/groups/service_test.go b/users/groups/service_test.go new file mode 100644 index 0000000000..168619e750 --- /dev/null +++ b/users/groups/service_test.go @@ -0,0 +1,809 @@ +package groups_test + +import ( + context "context" + fmt "fmt" + "regexp" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/users/clients" + cmocks "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/groups" + "github.com/mainflux/mainflux/users/groups/mocks" + "github.com/mainflux/mainflux/users/hasher" + "github.com/mainflux/mainflux/users/jwt" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + phasher = hasher.New() + secret = "strongsecret" + validGMetadata = mfclients.Metadata{"role": "client"} + inValidToken = "invalidToken" + description = "shortdescription" + gName = "groupname" + group = mfgroups.Group{ + Name: gName, + Description: description, + Metadata: validGMetadata, + Status: mfclients.EnabledStatus, + } + withinDuration = 5 * time.Second + passRegex = regexp.MustCompile("^.{8,}$") + accessDuration = time.Minute * 1 + refreshDuration = time.Minute * 10 +) + +func TestCreateGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + cases := []struct { + desc string + group mfgroups.Group + err error + }{ + { + desc: "create new group", + group: group, + err: nil, + }, + { + desc: "create group with existing name", + group: group, + err: nil, + }, + { + desc: "create group with parent", + group: mfgroups.Group{ + Name: gName, + Parent: testsutil.GenerateUUID(t, idProvider), + Status: mfclients.EnabledStatus, + }, + err: nil, + }, + { + desc: "create group with invalid parent", + group: mfgroups.Group{ + Name: gName, + Parent: mocks.WrongID, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "create group with invalid owner", + group: mfgroups.Group{ + Name: gName, + Owner: mocks.WrongID, + }, + err: errors.ErrCreateEntity, + }, + { + desc: "create group with missing name", + group: mfgroups.Group{}, + err: errors.ErrMalformedEntity, + }, + } + + for _, tc := range cases { + repoCall := gRepo.On("Save", context.Background(), mock.Anything).Return(tc.group, tc.err) + createdAt := time.Now() + expected, err := svc.CreateGroup(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + assert.NotEmpty(t, expected.ID, fmt.Sprintf("%s: expected %s not to be empty\n", tc.desc, expected.ID)) + assert.WithinDuration(t, expected.CreatedAt, createdAt, withinDuration, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected.CreatedAt, createdAt)) + tc.group.ID = expected.ID + tc.group.CreatedAt = expected.CreatedAt + tc.group.UpdatedAt = expected.UpdatedAt + tc.group.UpdatedBy = expected.UpdatedBy + tc.group.Owner = expected.Owner + assert.Equal(t, tc.group, expected, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.group, expected)) + ok := repoCall.Parent.AssertCalled(t, "Save", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + } + repoCall.Unset() + } +} + +func TestUpdateGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + group.ID = testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "update group name", + group: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + response: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + }, + { + desc: "update group description", + group: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + }, + { + desc: "update group metadata", + group: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + }, + { + desc: "update group name with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Name: "NewName", + }, + response: mfgroups.Group{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: errors.ErrNotFound, + }, + { + desc: "update group description with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Description: "NewDescription", + }, + response: mfgroups.Group{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: errors.ErrNotFound, + }, + { + desc: "update group metadata with invalid group id", + group: mfgroups.Group{ + ID: mocks.WrongID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: errors.ErrNotFound, + }, + { + desc: "update group name with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Name: "NewName", + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + { + desc: "update group description with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Description: "NewDescription", + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + { + desc: "update group metadata with invalid token", + group: mfgroups.Group{ + ID: group.ID, + Metadata: mfclients.Metadata{ + "field": "value2", + }, + }, + response: mfgroups.Group{}, + token: inValidToken, + err: errors.ErrAuthentication, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("Update", context.Background(), mock.Anything).Return(tc.response, tc.err) + expectedGroup, err := svc.UpdateGroup(context.Background(), tc.token, tc.group) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, expectedGroup, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, expectedGroup)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Update", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } + +} + +func TestViewGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + group.ID = testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + token string + groupID string + response mfgroups.Group + err error + }{ + { + + desc: "view group", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + groupID: group.ID, + response: group, + err: nil, + }, + { + desc: "view group with invalid token", + token: "wrongtoken", + groupID: group.ID, + response: mfgroups.Group{}, + err: errors.ErrAuthentication, + }, + { + desc: "view group for wrong id", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + groupID: mocks.WrongID, + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveByID", context.Background(), tc.groupID).Return(tc.response, tc.err) + expected, err := svc.ViewGroup(context.Background(), tc.token, tc.groupID) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, expected, tc.response, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, expected, tc.response)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.groupID) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListGroups(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + nGroups := uint64(200) + parentID := "" + var aGroups = []mfgroups.Group{} + for i := uint64(0); i < nGroups; i++ { + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: fmt.Sprintf("Group_%d", i), + Description: description, + Metadata: mfclients.Metadata{ + "field": "value", + }, + Parent: parentID, + } + parentID = group.ID + aGroups = append(aGroups, group) + } + + cases := []struct { + desc string + token string + size uint64 + response mfgroups.GroupsPage + page mfgroups.GroupsPage + err error + }{ + { + desc: "list all groups", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + size: nGroups, + err: nil, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: nGroups, + Limit: nGroups, + }, + }, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: nGroups, + Limit: nGroups, + }, + Groups: aGroups, + }, + }, + { + desc: "list groups with an offset", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + size: 150, + err: nil, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 50, + Total: nGroups, + Limit: nGroups, + }, + }, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Total: 150, + Limit: nGroups, + }, + Groups: aGroups[50:nGroups], + }, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, tc.err) + page, err := svc.ListGroups(context.Background(), tc.token, tc.page) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "RetrieveAll", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("RetrieveAll was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } + +} + +func TestEnableGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + enabledGroup1 := mfgroups.Group{ID: testsutil.GenerateUUID(t, idProvider), Name: "group1", Status: mfclients.EnabledStatus} + disabledGroup := mfgroups.Group{ID: testsutil.GenerateUUID(t, idProvider), Name: "group2", Status: mfclients.DisabledStatus} + disabledGroup1 := disabledGroup + disabledGroup1.Status = mfclients.EnabledStatus + + casesEnabled := []struct { + desc string + id string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "enable disabled group", + id: disabledGroup.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + group: disabledGroup, + response: disabledGroup1, + err: nil, + }, + { + desc: "enable enabled group", + id: enabledGroup1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + group: enabledGroup1, + response: enabledGroup1, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "enable non-existing group", + id: mocks.WrongID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + group: mfgroups.Group{}, + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range casesEnabled { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveByID", context.Background(), tc.id).Return(tc.group, tc.err) + repoCall2 := gRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.EnableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + casesDisabled := []struct { + desc string + status mfclients.Status + size uint64 + response mfgroups.GroupsPage + }{ + { + desc: "list activated groups", + status: mfclients.EnabledStatus, + size: 2, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup1}, + }, + }, + { + desc: "list deactivated groups", + status: mfclients.DisabledStatus, + size: 1, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{disabledGroup}, + }, + }, + { + desc: "list activated and deactivated groups", + status: mfclients.AllStatus, + size: 3, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup, disabledGroup1}, + }, + }, + } + + for _, tc := range casesDisabled { + pm := mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + }, + } + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListGroups(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Groups)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestDisableGroup(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + enabledGroup1 := mfgroups.Group{ID: testsutil.GenerateUUID(t, idProvider), Name: "group1", Status: mfclients.EnabledStatus} + disabledGroup := mfgroups.Group{ID: testsutil.GenerateUUID(t, idProvider), Name: "group2", Status: mfclients.DisabledStatus} + disabledGroup1 := enabledGroup1 + disabledGroup1.Status = mfclients.DisabledStatus + + casesDisabled := []struct { + desc string + id string + token string + group mfgroups.Group + response mfgroups.Group + err error + }{ + { + desc: "disable enabled group", + id: enabledGroup1.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + group: enabledGroup1, + response: disabledGroup1, + err: nil, + }, + { + desc: "disable disabled group", + id: disabledGroup.ID, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + group: disabledGroup, + response: mfgroups.Group{}, + err: mfclients.ErrStatusAlreadyAssigned, + }, + { + desc: "disable non-existing group", + id: mocks.WrongID, + group: mfgroups.Group{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + response: mfgroups.Group{}, + err: errors.ErrNotFound, + }, + } + + for _, tc := range casesDisabled { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveByID", context.Background(), tc.id).Return(tc.group, tc.err) + repoCall2 := gRepo.On("ChangeStatus", context.Background(), mock.Anything).Return(tc.response, tc.err) + _, err := svc.DisableGroup(context.Background(), tc.token, tc.id) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "RetrieveByID", context.Background(), tc.id) + assert.True(t, ok, fmt.Sprintf("RetrieveByID was not called on %s", tc.desc)) + ok = repoCall2.Parent.AssertCalled(t, "ChangeStatus", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("ChangeStatus was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + } + + casesEnabled := []struct { + desc string + status mfclients.Status + size uint64 + response mfgroups.GroupsPage + }{ + { + desc: "list activated groups", + status: mfclients.EnabledStatus, + size: 1, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 1, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1}, + }, + }, + { + desc: "list deactivated groups", + status: mfclients.DisabledStatus, + size: 2, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 2, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{disabledGroup1, disabledGroup}, + }, + }, + { + desc: "list activated and deactivated groups", + status: mfclients.AllStatus, + size: 3, + response: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Total: 3, + Offset: 0, + Limit: 100, + }, + Groups: []mfgroups.Group{enabledGroup1, disabledGroup, disabledGroup1}, + }, + }, + } + + for _, tc := range casesEnabled { + pm := mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 0, + Limit: 100, + Status: tc.status, + }, + } + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("RetrieveAll", context.Background(), mock.Anything).Return(tc.response, nil) + page, err := svc.ListGroups(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), pm) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + size := uint64(len(page.Groups)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) + repoCall.Unset() + repoCall1.Unset() + } +} + +func TestListMemberships(t *testing.T) { + cRepo := new(cmocks.ClientRepository) + gRepo := new(mocks.GroupRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := cmocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := groups.NewService(gRepo, pRepo, tokenizer, idProvider) + + var nGroups = uint64(100) + var aGroups = []mfgroups.Group{} + for i := uint64(1); i < nGroups; i++ { + group := mfgroups.Group{ + Name: fmt.Sprintf("membership_%d@example.com", i), + Metadata: mfclients.Metadata{"role": "group"}, + } + aGroups = append(aGroups, group) + } + validID := testsutil.GenerateUUID(t, idProvider) + validToken := testsutil.GenerateValidToken(t, validID, csvc, cRepo, phasher) + + cases := []struct { + desc string + token string + clientID string + page mfgroups.GroupsPage + response mfgroups.MembershipsPage + err error + }{ + { + desc: "list clients with authorized token", + token: validToken, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: validID, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: nGroups, + Offset: 0, + Limit: 0, + }, + Memberships: aGroups, + }, + err: nil, + }, + { + desc: "list clients with offset and limit", + token: validToken, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Offset: 6, + Total: nGroups, + Limit: nGroups, + Status: mfclients.AllStatus, + Subject: validID, + Action: "g_list", + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: nGroups - 6, + }, + Memberships: aGroups[6:nGroups], + }, + }, + { + desc: "list clients with an invalid token", + token: inValidToken, + clientID: testsutil.GenerateUUID(t, idProvider), + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: validID, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrAuthentication, + }, + { + desc: "list clients with an invalid id", + token: validToken, + clientID: mocks.WrongID, + page: mfgroups.GroupsPage{ + Page: mfgroups.Page{ + Action: "g_list", + Subject: validID, + }, + }, + response: mfgroups.MembershipsPage{ + Page: mfgroups.Page{ + Total: 0, + Offset: 0, + Limit: 0, + }, + }, + err: errors.ErrNotFound, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := gRepo.On("Memberships", context.Background(), tc.clientID, tc.page).Return(tc.response, tc.err) + page, err := svc.ListMemberships(context.Background(), tc.token, tc.clientID, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected %v got %v\n", tc.desc, tc.response, page)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Memberships", context.Background(), tc.clientID, tc.page) + assert.True(t, ok, fmt.Sprintf("Memberships was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} diff --git a/users/groups/status.go b/users/groups/status.go new file mode 100644 index 0000000000..0cd87d83a0 --- /dev/null +++ b/users/groups/status.go @@ -0,0 +1,55 @@ +package groups + +import "github.com/mainflux/mainflux/internal/apiutil" + +// Status represents Group status. +type Status uint8 + +// Possible Group status values +const ( + // EnabledStatus represents enabled Group. + EnabledStatus Status = iota + // DisabledStatus represents disabled Group. + DisabledStatus + + // AllStatus is used for querying purposes to list groups irrespective + // of their status - both active and inactive. It is never stored in the + // database as the actual Group status and should always be the largest + // value in this enumeration. + AllStatus +) + +// String representation of the possible status values. +const ( + Disabled = "disabled" + Enabled = "enabled" + All = "all" + Unknown = "unknown" +) + +// String converts group status to string literal. +func (s Status) String() string { + switch s { + case DisabledStatus: + return Disabled + case EnabledStatus: + return Enabled + case AllStatus: + return All + default: + return Unknown + } +} + +// ToStatus converts string value to a valid Group status. +func ToStatus(status string) (Status, error) { + switch status { + case Disabled: + return DisabledStatus, nil + case Enabled: + return EnabledStatus, nil + case All: + return AllStatus, nil + } + return Status(0), apiutil.ErrInvalidStatus +} diff --git a/users/groups/tracing/tracing.go b/users/groups/tracing/tracing.go new file mode 100644 index 0000000000..a2664cab3d --- /dev/null +++ b/users/groups/tracing/tracing.go @@ -0,0 +1,73 @@ +package tracing + +import ( + "context" + + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/users/groups" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ groups.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + gsvc groups.Service +} + +func TracingMiddleware(gsvc groups.Service, tracer trace.Tracer) groups.Service { + return &tracingMiddleware{tracer, gsvc} +} + +func (tm *tracingMiddleware) CreateGroup(ctx context.Context, token string, g mfgroups.Group) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_create_group", trace.WithAttributes(attribute.String("Name", g.Name))) + defer span.End() + + return tm.gsvc.CreateGroup(ctx, token, g) + +} + +func (tm *tracingMiddleware) ViewGroup(ctx context.Context, token string, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_view_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.ViewGroup(ctx, token, id) + +} + +func (tm *tracingMiddleware) ListGroups(ctx context.Context, token string, gm mfgroups.GroupsPage) (mfgroups.GroupsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_groups") + defer span.End() + + return tm.gsvc.ListGroups(ctx, token, gm) + +} + +func (tm *tracingMiddleware) ListMemberships(ctx context.Context, token, clientID string, gm mfgroups.GroupsPage) (mfgroups.MembershipsPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_memberships") + defer span.End() + return tm.gsvc.ListMemberships(ctx, token, clientID, gm) +} + +func (tm *tracingMiddleware) UpdateGroup(ctx context.Context, token string, g mfgroups.Group) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_update_group", trace.WithAttributes(attribute.String("Name", g.Name))) + defer span.End() + + return tm.gsvc.UpdateGroup(ctx, token, g) + +} + +func (tm *tracingMiddleware) EnableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_enable_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.EnableGroup(ctx, token, id) +} + +func (tm *tracingMiddleware) DisableGroup(ctx context.Context, token, id string) (mfgroups.Group, error) { + ctx, span := tm.tracer.Start(ctx, "svc_disable_group", trace.WithAttributes(attribute.String("ID", id))) + defer span.End() + + return tm.gsvc.DisableGroup(ctx, token, id) +} diff --git a/users/hasher.go b/users/hasher.go deleted file mode 100644 index cafb6b0d1a..0000000000 --- a/users/hasher.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users - -// Hasher specifies an API for generating hashes of an arbitrary textual -// content. -type Hasher interface { - // Hash generates the hashed string from plain-text. - Hash(string) (string, error) - - // Compare compares plain-text version to the hashed one. An error should - // indicate failed comparison. - Compare(string, string) error -} diff --git a/users/hasher/hasher.go b/users/hasher/hasher.go new file mode 100644 index 0000000000..20e17fa62b --- /dev/null +++ b/users/hasher/hasher.go @@ -0,0 +1,40 @@ +// Package hasher provides a hasher implementation utilizing bcrypt. +package hasher + +import ( + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/clients" + "golang.org/x/crypto/bcrypt" +) + +const cost int = 10 + +var ( + errHashPassword = errors.New("Generate hash from password failed") + errComparePassword = errors.New("Compare hash and password failed") +) + +var _ clients.Hasher = (*bcryptHasher)(nil) + +type bcryptHasher struct{} + +// New instantiates a bcrypt-based hasher implementation. +func New() clients.Hasher { + return &bcryptHasher{} +} + +func (bh *bcryptHasher) Hash(pwd string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(pwd), cost) + if err != nil { + return "", errors.Wrap(errHashPassword, err) + } + + return string(hash), nil +} + +func (bh *bcryptHasher) Compare(plain, hashed string) error { + if err := bcrypt.CompareHashAndPassword([]byte(hashed), []byte(plain)); err != nil { + return errors.Wrap(errComparePassword, err) + } + return nil +} diff --git a/users/jwt/jwt.go b/users/jwt/jwt.go new file mode 100644 index 0000000000..53fbfdc2db --- /dev/null +++ b/users/jwt/jwt.go @@ -0,0 +1,47 @@ +package jwt + +import ( + "context" +) + +// Possible token types are access and refresh tokens. +const ( + RefreshToken = "refresh" + AccessToken = "access" +) + +// Token is used for authentication purposes. +// It contains AccessToken, RefreshToken, Type and AccessExpiry. +type Token struct { + AccessToken string // AccessToken contains the security credentials for a login session and identifies the client. + RefreshToken string // RefreshToken is a credential artifact that OAuth can use to get a new access token without client interaction. + AccessType string // AccessType is the specific type of access token issued. It can be Bearer, Client or Basic. +} + +// Claims are the Client's internal JWT Claims. +type Claims struct { + ClientID string // ClientID is the client unique identifier. + Email string // Email is the client identity + Type string // Type denotes the type of claim. Either AccessToken or RefreshToken. +} + +// TokenService specifies an API that must be fulfilled by the domain service +// implementation, and all of its decorators (e.g. logging & metrics). +type TokenService interface { + // IssueToken issues a new access and refresh token. + IssueToken(ctx context.Context, identity, secret string) (Token, error) + + // RefreshToken refreshes expired access tokens. + // After an access token expires, the refresh token is used to get + // a new pair of access and refresh tokens. + RefreshToken(ctx context.Context, accessToken string) (Token, error) +} + +// TokenRepository specifies an account persistence API. +type TokenRepository interface { + // Issue issues a new access and refresh token. + Issue(ctx context.Context, claim Claims) (Token, error) + + // Parse checks the validity of a token. + Parse(ctx context.Context, token string) (Claims, error) +} diff --git a/users/jwt/tokens.go b/users/jwt/tokens.go new file mode 100644 index 0000000000..58099279e7 --- /dev/null +++ b/users/jwt/tokens.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "context" + "time" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwt" + "github.com/mainflux/mainflux/pkg/errors" +) + +const issuerName = "clients.auth" + +var _ TokenRepository = (*tokenRepo)(nil) + +type tokenRepo struct { + secret []byte + accessDuration time.Duration + refreshDuration time.Duration +} + +// NewTokenRepo instantiates an implementation of Token repository. +func NewTokenRepo(secret []byte, aduration, rduration time.Duration) TokenRepository { + return &tokenRepo{ + secret: secret, + accessDuration: aduration, + refreshDuration: rduration, + } +} + +func (repo tokenRepo) Issue(ctx context.Context, claim Claims) (Token, error) { + aexpiry := time.Now().Add(repo.accessDuration) + accessToken, err := jwt.NewBuilder(). + Issuer(issuerName). + IssuedAt(time.Now()). + Subject(claim.ClientID). + Claim("identity", claim.Email). + Claim("type", AccessToken). + Expiration(aexpiry). + Build() + if err != nil { + return Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + signedAccessToken, err := jwt.Sign(accessToken, jwt.WithKey(jwa.HS512, repo.secret)) + if err != nil { + return Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + refreshToken, err := jwt.NewBuilder(). + Issuer(issuerName). + IssuedAt(time.Now()). + Subject(claim.ClientID). + Claim("identity", claim.Email). + Claim("type", RefreshToken). + Expiration(time.Now().Add(repo.refreshDuration)). + Build() + if err != nil { + return Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + signedRefreshToken, err := jwt.Sign(refreshToken, jwt.WithKey(jwa.HS512, repo.secret)) + if err != nil { + return Token{}, errors.Wrap(errors.ErrAuthentication, err) + } + + return Token{ + AccessToken: string(signedAccessToken[:]), + RefreshToken: string(signedRefreshToken[:]), + AccessType: "Bearer", + }, nil +} + +func (repo tokenRepo) Parse(ctx context.Context, accessToken string) (Claims, error) { + token, err := jwt.Parse( + []byte(accessToken), + jwt.WithValidate(true), + jwt.WithKey(jwa.HS512, repo.secret), + ) + if err != nil { + return Claims{}, errors.Wrap(errors.ErrAuthentication, err) + } + tType, ok := token.Get("type") + if !ok { + return Claims{}, errors.Wrap(errors.ErrAuthentication, err) + } + identity, ok := token.Get("identity") + if !ok { + return Claims{}, errors.Wrap(errors.ErrAuthentication, err) + } + claim := Claims{ + ClientID: token.Subject(), + Email: identity.(string), + Type: tType.(string), + } + return claim, nil +} diff --git a/users/jwt/tracing.go b/users/jwt/tracing.go new file mode 100644 index 0000000000..c075f37b4d --- /dev/null +++ b/users/jwt/tracing.go @@ -0,0 +1,37 @@ +package jwt + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ TokenRepository = (*tokenRepoMiddlware)(nil) + +type tokenRepoMiddlware struct { + repo TokenRepository + tracer trace.Tracer +} + +// NewTokenRepoMiddleware instantiates an implementation of tracing Token repository. +func NewTokenRepoMiddleware(repo TokenRepository, tracer trace.Tracer) TokenRepository { + return &tokenRepoMiddlware{ + repo: repo, + tracer: tracer, + } +} + +func (trm tokenRepoMiddlware) Issue(ctx context.Context, claim Claims) (Token, error) { + ctx, span := trm.tracer.Start(ctx, "issue_token", trace.WithAttributes(attribute.String("clientid", claim.ClientID))) + defer span.End() + + return trm.repo.Issue(ctx, claim) +} + +func (trm tokenRepoMiddlware) Parse(ctx context.Context, accessToken string) (Claims, error) { + ctx, span := trm.tracer.Start(ctx, "parse_token", trace.WithAttributes(attribute.String("accesstoken", accessToken))) + defer span.End() + + return trm.repo.Parse(ctx, accessToken) +} diff --git a/users/mocks/authn.go b/users/mocks/authn.go deleted file mode 100644 index 2da336ac98..0000000000 --- a/users/mocks/authn.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "google.golang.org/grpc" -) - -var _ mainflux.AuthServiceClient = (*authServiceMock)(nil) - -type SubjectSet struct { - Object string - Relation string -} - -type authServiceMock struct { - users map[string]string - authz map[string][]SubjectSet -} - -func (svc authServiceMock) ListPolicies(ctx context.Context, in *mainflux.ListPoliciesReq, opts ...grpc.CallOption) (*mainflux.ListPoliciesRes, error) { - panic("not implemented") -} - -// NewAuthService creates mock of users service. -func NewAuthService(users map[string]string, authzDB map[string][]SubjectSet) mainflux.AuthServiceClient { - return &authServiceMock{users, authzDB} -} - -func (svc authServiceMock) Identify(ctx context.Context, in *mainflux.Token, opts ...grpc.CallOption) (*mainflux.UserIdentity, error) { - if id, ok := svc.users[in.Value]; ok { - return &mainflux.UserIdentity{Id: id, Email: id}, nil - } - return nil, errors.ErrAuthentication -} - -func (svc authServiceMock) Issue(ctx context.Context, in *mainflux.IssueReq, opts ...grpc.CallOption) (*mainflux.Token, error) { - if id, ok := svc.users[in.GetEmail()]; ok { - switch in.Type { - default: - return &mainflux.Token{Value: id}, nil - } - } - return nil, errors.ErrAuthentication -} - -func (svc authServiceMock) Authorize(ctx context.Context, req *mainflux.AuthorizeReq, _ ...grpc.CallOption) (r *mainflux.AuthorizeRes, err error) { - if sub, ok := svc.authz[req.GetSub()]; ok { - for _, v := range sub { - if v.Relation == req.GetAct() && v.Object == req.GetObj() { - return &mainflux.AuthorizeRes{Authorized: true}, nil - } - } - } - return &mainflux.AuthorizeRes{Authorized: false}, nil -} - -func (svc authServiceMock) AddPolicy(ctx context.Context, in *mainflux.AddPolicyReq, opts ...grpc.CallOption) (*mainflux.AddPolicyRes, error) { - svc.authz[in.GetSub()] = append(svc.authz[in.GetSub()], SubjectSet{Object: in.GetObj(), Relation: in.GetAct()}) - return &mainflux.AddPolicyRes{Authorized: true}, nil -} - -func (svc authServiceMock) DeletePolicy(ctx context.Context, in *mainflux.DeletePolicyReq, opts ...grpc.CallOption) (*mainflux.DeletePolicyRes, error) { - // Not implemented - return &mainflux.DeletePolicyRes{Deleted: true}, nil -} - -func (svc authServiceMock) Members(ctx context.Context, req *mainflux.MembersReq, _ ...grpc.CallOption) (r *mainflux.MembersRes, err error) { - panic("not implemented") -} - -func (svc authServiceMock) Assign(ctx context.Context, req *mainflux.Assignment, _ ...grpc.CallOption) (r *empty.Empty, err error) { - panic("not implemented") -} diff --git a/users/mocks/email.go b/users/mocks/email.go deleted file mode 100644 index bc8657fb5c..0000000000 --- a/users/mocks/email.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "github.com/mainflux/mainflux/users" -) - -type emailerMock struct { -} - -// NewEmailer provides emailer instance for the test -func NewEmailer() users.Emailer { - return &emailerMock{} -} - -func (e *emailerMock) SendPasswordReset([]string, string, string) error { - return nil -} diff --git a/users/mocks/hasher.go b/users/mocks/hasher.go deleted file mode 100644 index 9edfa5e9bb..0000000000 --- a/users/mocks/hasher.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" -) - -var _ users.Hasher = (*hasherMock)(nil) - -type hasherMock struct{} - -// NewHasher creates "no-op" hasher for test purposes. This implementation will -// return secrets without changing them. -func NewHasher() users.Hasher { - return &hasherMock{} -} - -func (hm *hasherMock) Hash(pwd string) (string, error) { - if pwd == "" { - return "", errors.ErrMalformedEntity - } - return pwd, nil -} - -func (hm *hasherMock) Compare(plain, hashed string) error { - if plain != hashed { - return errors.ErrAuthentication - } - - return nil -} diff --git a/users/mocks/users.go b/users/mocks/users.go deleted file mode 100644 index 3e34b27c70..0000000000 --- a/users/mocks/users.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package mocks - -import ( - "context" - "sort" - "sync" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" -) - -var _ users.UserRepository = (*userRepositoryMock)(nil) - -type userRepositoryMock struct { - mu sync.Mutex - users map[string]users.User - usersByID map[string]users.User - usersByGroupID map[string]users.User -} - -// NewUserRepository creates in-memory user repository -func NewUserRepository() users.UserRepository { - return &userRepositoryMock{ - users: make(map[string]users.User), - usersByID: make(map[string]users.User), - usersByGroupID: make(map[string]users.User), - } -} - -func (urm *userRepositoryMock) Save(ctx context.Context, user users.User) (string, error) { - urm.mu.Lock() - defer urm.mu.Unlock() - - if _, ok := urm.users[user.Email]; ok { - return "", errors.ErrConflict - } - - urm.users[user.Email] = user - urm.usersByID[user.ID] = user - return user.ID, nil -} - -func (urm *userRepositoryMock) Update(ctx context.Context, user users.User) error { - urm.mu.Lock() - defer urm.mu.Unlock() - - if _, ok := urm.users[user.Email]; !ok { - return errors.ErrNotFound - } - - urm.users[user.Email] = user - return nil -} - -func (urm *userRepositoryMock) UpdateUser(ctx context.Context, user users.User) error { - urm.mu.Lock() - defer urm.mu.Unlock() - - if _, ok := urm.users[user.Email]; !ok { - return errors.ErrNotFound - } - - urm.users[user.Email] = user - return nil -} - -func (urm *userRepositoryMock) RetrieveByEmail(ctx context.Context, email string) (users.User, error) { - urm.mu.Lock() - defer urm.mu.Unlock() - - val, ok := urm.users[email] - if !ok { - return users.User{}, errors.ErrNotFound - } - - return val, nil -} - -func (urm *userRepositoryMock) RetrieveByID(ctx context.Context, id string) (users.User, error) { - urm.mu.Lock() - defer urm.mu.Unlock() - - val, ok := urm.usersByID[id] - if !ok { - return users.User{}, errors.ErrNotFound - } - - return val, nil -} - -func (urm *userRepositoryMock) RetrieveAll(ctx context.Context, ids []string, pm users.PageMetadata) (users.UserPage, error) { - urm.mu.Lock() - defer urm.mu.Unlock() - - up := users.UserPage{} - i := uint64(0) - - if pm.Email != "" { - val, ok := urm.users[pm.Email] - if !ok { - return users.UserPage{}, errors.ErrNotFound - } - up.Offset = pm.Offset - up.Limit = pm.Limit - up.Total = uint64(i) - up.Users = []users.User{val} - return up, nil - } - - if pm.Status == users.EnabledStatusKey || pm.Status == users.DisabledStatusKey { - for _, u := range sortUsers(urm.users) { - if i >= pm.Offset && i < (pm.Limit+pm.Offset) { - if pm.Status == u.Status { - up.Users = append(up.Users, u) - } - } - i++ - } - up.Offset = pm.Offset - up.Limit = pm.Limit - up.Total = uint64(i) - return up, nil - } - for _, u := range sortUsers(urm.users) { - if i >= pm.Offset && i < (pm.Limit+pm.Offset) { - up.Users = append(up.Users, u) - } - i++ - } - - up.Offset = pm.Offset - up.Limit = pm.Limit - up.Total = uint64(i) - - return up, nil -} - -func (urm *userRepositoryMock) UpdatePassword(_ context.Context, token, password string) error { - urm.mu.Lock() - defer urm.mu.Unlock() - - if _, ok := urm.users[token]; !ok { - return errors.ErrNotFound - } - return nil -} - -func (urm *userRepositoryMock) ChangeStatus(ctx context.Context, id, status string) error { - urm.mu.Lock() - defer urm.mu.Unlock() - - user, ok := urm.usersByID[id] - if !ok { - return errors.ErrNotFound - } - user.Status = status - urm.usersByID[id] = user - urm.users[user.Email] = user - return nil -} -func sortUsers(us map[string]users.User) []users.User { - users := []users.User{} - ids := make([]string, 0, len(us)) - for k := range us { - ids = append(ids, k) - } - - sort.Strings(ids) - for _, id := range ids { - users = append(users, us[id]) - } - - return users -} diff --git a/users/policies/api/grpc/client.go b/users/policies/api/grpc/client.go new file mode 100644 index 0000000000..db4e10246c --- /dev/null +++ b/users/policies/api/grpc/client.go @@ -0,0 +1,240 @@ +package grpc + +import ( + "context" + "time" + + "github.com/go-kit/kit/endpoint" + kitgrpc "github.com/go-kit/kit/transport/grpc" + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + "google.golang.org/grpc" +) + +const svcName = "mainflux.users.policies.AuthService" + +var _ policies.AuthServiceClient = (*grpcClient)(nil) + +type grpcClient struct { + authorize endpoint.Endpoint + issue endpoint.Endpoint + identify endpoint.Endpoint + addPolicy endpoint.Endpoint + deletePolicy endpoint.Endpoint + listPolicies endpoint.Endpoint + timeout time.Duration +} + +// NewClient returns new gRPC client instance. +func NewClient(conn *grpc.ClientConn, timeout time.Duration) policies.AuthServiceClient { + return &grpcClient{ + authorize: otelkit.EndpointMiddleware(otelkit.WithOperation("authorize"))(kitgrpc.NewClient( + conn, + svcName, + "Authorize", + encodeAuthorizeRequest, + decodeAuthorizeResponse, + policies.AuthorizeRes{}, + ).Endpoint()), + issue: otelkit.EndpointMiddleware(otelkit.WithOperation("issue"))(kitgrpc.NewClient( + conn, + svcName, + "Issue", + encodeIssueRequest, + decodeIssueResponse, + policies.UserIdentity{}, + ).Endpoint()), + identify: otelkit.EndpointMiddleware(otelkit.WithOperation("identify"))(kitgrpc.NewClient( + conn, + svcName, + "Identify", + encodeIdentifyRequest, + decodeIdentifyResponse, + policies.UserIdentity{}, + ).Endpoint()), + addPolicy: otelkit.EndpointMiddleware(otelkit.WithOperation("add_policy"))(kitgrpc.NewClient( + conn, + svcName, + "AddPolicy", + encodeAddPolicyRequest, + decodeAddPolicyResponse, + policies.AddPolicyRes{}, + ).Endpoint()), + deletePolicy: otelkit.EndpointMiddleware(otelkit.WithOperation("delete_policy"))(kitgrpc.NewClient( + conn, + svcName, + "DeletePolicy", + encodeDeletePolicyRequest, + decodeDeletePolicyResponse, + policies.DeletePolicyRes{}, + ).Endpoint()), + listPolicies: otelkit.EndpointMiddleware(otelkit.WithOperation("list_policies"))(kitgrpc.NewClient( + conn, + svcName, + "ListPolicies", + encodeListPoliciesRequest, + decodeListPoliciesResponse, + policies.ListPoliciesRes{}, + ).Endpoint()), + + timeout: timeout, + } +} + +func (client grpcClient) Authorize(ctx context.Context, req *policies.AuthorizeReq, _ ...grpc.CallOption) (r *policies.AuthorizeRes, err error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + areq := authReq{Act: req.GetAct(), Obj: req.GetObj(), Sub: req.GetSub(), EntityType: req.GetEntityType()} + res, err := client.authorize(ctx, areq) + if err != nil { + return &policies.AuthorizeRes{}, err + } + + ar := res.(authorizeRes) + return &policies.AuthorizeRes{Authorized: ar.authorized}, err +} + +func decodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.AuthorizeRes) + return authorizeRes{authorized: res.Authorized}, nil +} + +func encodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(authReq) + return &policies.AuthorizeReq{ + Sub: req.Sub, + Obj: req.Obj, + Act: req.Act, + EntityType: req.EntityType, + }, nil +} + +func (client grpcClient) Issue(ctx context.Context, req *policies.IssueReq, _ ...grpc.CallOption) (*policies.Token, error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + ireq := issueReq{email: req.GetEmail(), password: req.GetPassword()} + res, err := client.issue(ctx, ireq) + if err != nil { + return nil, err + } + + ir := res.(identityRes) + return &policies.Token{Value: ir.id}, nil +} + +func encodeIssueRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(issueReq) + return &policies.IssueReq{Email: req.email, Password: req.password}, nil +} + +func decodeIssueResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.UserIdentity) + return identityRes{id: res.GetId()}, nil +} + +func (client grpcClient) Identify(ctx context.Context, token *policies.Token, _ ...grpc.CallOption) (*policies.UserIdentity, error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + + res, err := client.identify(ctx, identityReq{token: token.GetValue()}) + if err != nil { + return nil, err + } + + ir := res.(identityRes) + return &policies.UserIdentity{Id: ir.id}, nil +} + +func encodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(identityReq) + return &policies.Token{Value: req.token}, nil +} + +func decodeIdentifyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.UserIdentity) + return identityRes{id: res.GetId()}, nil +} + +func (client grpcClient) AddPolicy(ctx context.Context, in *policies.AddPolicyReq, opts ...grpc.CallOption) (*policies.AddPolicyRes, error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + areq := addPolicyReq{Token: in.GetToken(), Act: in.GetAct(), Obj: in.GetObj(), Sub: in.GetSub()} + res, err := client.addPolicy(ctx, areq) + if err != nil { + return &policies.AddPolicyRes{}, err + } + + apr := res.(addPolicyRes) + return &policies.AddPolicyRes{Authorized: apr.authorized}, err +} + +func decodeAddPolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.AddPolicyRes) + return addPolicyRes{authorized: res.Authorized}, nil +} + +func encodeAddPolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(addPolicyReq) + return &policies.AddPolicyReq{ + Token: req.Token, + Sub: req.Sub, + Obj: req.Obj, + Act: req.Act, + }, nil +} + +func (client grpcClient) DeletePolicy(ctx context.Context, in *policies.DeletePolicyReq, opts ...grpc.CallOption) (*policies.DeletePolicyRes, error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + preq := policyReq{Token: in.GetToken(), Act: in.GetAct(), Obj: in.GetObj(), Sub: in.GetSub()} + res, err := client.deletePolicy(ctx, preq) + if err != nil { + return &policies.DeletePolicyRes{}, err + } + + dpr := res.(deletePolicyRes) + return &policies.DeletePolicyRes{Deleted: dpr.deleted}, err +} + +func decodeDeletePolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.DeletePolicyRes) + return deletePolicyRes{deleted: res.GetDeleted()}, nil +} + +func encodeDeletePolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(policyReq) + return &policies.DeletePolicyReq{ + Token: req.Token, + Sub: req.Sub, + Obj: req.Obj, + Act: req.Act, + }, nil +} + +func (client grpcClient) ListPolicies(ctx context.Context, in *policies.ListPoliciesReq, opts ...grpc.CallOption) (*policies.ListPoliciesRes, error) { + ctx, close := context.WithTimeout(ctx, client.timeout) + defer close() + lreq := listPoliciesReq{Token: in.GetToken(), Obj: in.GetObj(), Act: in.GetAct(), Sub: in.GetSub()} + res, err := client.listPolicies(ctx, lreq) + if err != nil { + return &policies.ListPoliciesRes{}, err + } + + lpr := res.(listPoliciesRes) + return &policies.ListPoliciesRes{Objects: lpr.objects}, err +} + +func decodeListPoliciesResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(*policies.ListPoliciesRes) + return listPoliciesRes{objects: res.GetObjects()}, nil +} + +func encodeListPoliciesRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(listPoliciesReq) + return &policies.ListPoliciesReq{ + Token: req.Token, + Sub: req.Sub, + Obj: req.Obj, + Act: req.Act, + }, nil +} diff --git a/users/policies/api/grpc/endpoint.go b/users/policies/api/grpc/endpoint.go new file mode 100644 index 0000000000..f8a5bd2ed9 --- /dev/null +++ b/users/policies/api/grpc/endpoint.go @@ -0,0 +1,107 @@ +package grpc + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/policies" +) + +func authorizeEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(authReq) + + if err := req.validate(); err != nil { + return authorizeRes{}, err + } + policy := policies.Policy{Subject: req.Sub, Object: req.Obj, Actions: []string{req.Act}} + err := svc.Authorize(ctx, req.EntityType, policy) + if err != nil { + return authorizeRes{}, err + } + return authorizeRes{authorized: true}, err + } +} + +func issueEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(issueReq) + if err := req.validate(); err != nil { + return issueRes{}, err + } + + tkn, err := svc.IssueToken(ctx, req.email, req.password) + if err != nil { + return issueRes{}, err + } + + return issueRes{value: tkn.AccessToken}, nil + } +} + +func identifyEndpoint(svc clients.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(identityReq) + if err := req.validate(); err != nil { + return identityRes{}, err + } + + id, err := svc.Identify(ctx, req.token) + if err != nil { + return identityRes{}, err + } + + ret := identityRes{ + id: id, + } + return ret, nil + } +} + +func addPolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(addPolicyReq) + if err := req.validate(); err != nil { + return addPolicyRes{}, err + } + policy := policies.Policy{Subject: req.Sub, Object: req.Obj, Actions: req.Act} + err := svc.AddPolicy(ctx, req.Token, policy) + if err != nil { + return addPolicyRes{}, err + } + return addPolicyRes{authorized: true}, err + } +} + +func deletePolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(policyReq) + if err := req.validate(); err != nil { + return deletePolicyRes{}, err + } + + policy := policies.Policy{Subject: req.Sub, Object: req.Obj, Actions: []string{req.Act}} + err := svc.DeletePolicy(ctx, req.Token, policy) + if err != nil { + return deletePolicyRes{}, err + } + return deletePolicyRes{deleted: true}, nil + } +} + +func listPoliciesEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listPoliciesReq) + pp := policies.Page{Subject: req.Sub, Object: req.Obj, Action: req.Act, Limit: 10} + page, err := svc.ListPolicy(ctx, req.Token, pp) + if err != nil { + return listPoliciesRes{}, err + } + var objects []string + for _, p := range page.Policies { + objects = append(objects, p.Object) + } + return listPoliciesRes{objects: objects}, nil + } +} diff --git a/users/policies/api/grpc/requests.go b/users/policies/api/grpc/requests.go new file mode 100644 index 0000000000..68b8dc1add --- /dev/null +++ b/users/policies/api/grpc/requests.go @@ -0,0 +1,116 @@ +package grpc + +import ( + "github.com/mainflux/mainflux/internal/apiutil" +) + +// authReq represents authorization request. It contains: +// 1. subject - an action invoker (client) +// 2. object - an entity over which action will be executed (client, group, computation, dataset) +// 3. action - type of action that will be executed (read/write) +type authReq struct { + Sub string + Obj string + Act string + EntityType string +} + +func (req authReq) validate() error { + if req.Sub == "" { + return apiutil.ErrMissingPolicySub + } + if req.Obj == "" { + return apiutil.ErrMissingPolicyObj + } + if req.Act == "" { + return apiutil.ErrMalformedPolicyAct + } + if req.EntityType == "" { + return apiutil.ErrMissingPolicyEntityType + } + + return nil +} + +type identityReq struct { + token string +} + +func (req identityReq) validate() error { + if req.token == "" { + return apiutil.ErrBearerToken + } + + return nil +} + +type issueReq struct { + email string + password string +} + +func (req issueReq) validate() error { + if req.email == "" { + return apiutil.ErrMissingEmail + } + if req.password == "" { + return apiutil.ErrMissingPass + } + return nil +} + +type addPolicyReq struct { + Token string + Sub string + Obj string + Act []string +} + +func (req addPolicyReq) validate() error { + if req.Token == "" { + return apiutil.ErrBearerToken + } + if req.Sub == "" { + return apiutil.ErrMissingPolicySub + } + + if req.Obj == "" { + return apiutil.ErrMissingPolicyObj + } + + if len(req.Act) == 0 { + return apiutil.ErrMalformedPolicyAct + } + + return nil +} + +type policyReq struct { + Token string + Sub string + Obj string + Act string +} + +func (req policyReq) validate() error { + if req.Sub == "" { + return apiutil.ErrMissingPolicySub + } + + if req.Obj == "" { + return apiutil.ErrMissingPolicyObj + } + + if req.Act == "" { + return apiutil.ErrMalformedPolicyAct + } + + return nil +} + +type listPoliciesReq struct { + Token string + Sub string + Obj string + Act string +} diff --git a/users/policies/api/grpc/responses.go b/users/policies/api/grpc/responses.go new file mode 100644 index 0000000000..60f6f1a73e --- /dev/null +++ b/users/policies/api/grpc/responses.go @@ -0,0 +1,25 @@ +package grpc + +type authorizeRes struct { + authorized bool +} + +type identityRes struct { + id string +} + +type issueRes struct { + value string +} + +type addPolicyRes struct { + authorized bool +} + +type deletePolicyRes struct { + deleted bool +} + +type listPoliciesRes struct { + objects []string +} diff --git a/users/policies/api/grpc/transport.go b/users/policies/api/grpc/transport.go new file mode 100644 index 0000000000..82aba84ec7 --- /dev/null +++ b/users/policies/api/grpc/transport.go @@ -0,0 +1,195 @@ +package grpc + +import ( + "context" + + kitgrpc "github.com/go-kit/kit/transport/grpc" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ policies.AuthServiceServer = (*grpcServer)(nil) + +type grpcServer struct { + authorize kitgrpc.Handler + issue kitgrpc.Handler + identify kitgrpc.Handler + addPolicy kitgrpc.Handler + deletePolicy kitgrpc.Handler + listPolicies kitgrpc.Handler + policies.UnimplementedAuthServiceServer +} + +// NewServer returns new AuthServiceServer instance. +func NewServer(csvc clients.Service, psvc policies.Service) policies.AuthServiceServer { + return &grpcServer{ + authorize: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("authorize"))(authorizeEndpoint(psvc)), + decodeAuthorizeRequest, + encodeAuthorizeResponse, + ), + issue: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("issue"))(issueEndpoint(csvc)), + decodeIssueRequest, + encodeIssueResponse, + ), + identify: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("identify"))(identifyEndpoint(csvc)), + decodeIdentifyRequest, + encodeIdentifyResponse, + ), + addPolicy: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("add_policy"))(addPolicyEndpoint(psvc)), + decodeAddPolicyRequest, + encodeAddPolicyResponse, + ), + deletePolicy: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("delete_policy"))(deletePolicyEndpoint(psvc)), + decodeDeletePolicyRequest, + encodeDeletePolicyResponse, + ), + listPolicies: kitgrpc.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_policies"))(listPoliciesEndpoint(psvc)), + decodeListPoliciesRequest, + encodeListPoliciesResponse, + ), + } +} + +func (s *grpcServer) Authorize(ctx context.Context, req *policies.AuthorizeReq) (*policies.AuthorizeRes, error) { + _, res, err := s.authorize.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.AuthorizeRes), nil +} + +func (s *grpcServer) Issue(ctx context.Context, req *policies.IssueReq) (*policies.Token, error) { + _, res, err := s.issue.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.Token), nil +} + +func (s *grpcServer) Identify(ctx context.Context, token *policies.Token) (*policies.UserIdentity, error) { + _, res, err := s.identify.ServeGRPC(ctx, token) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.UserIdentity), nil +} + +func (s *grpcServer) AddPolicy(ctx context.Context, req *policies.AddPolicyReq) (*policies.AddPolicyRes, error) { + _, res, err := s.addPolicy.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.AddPolicyRes), nil +} + +func (s *grpcServer) DeletePolicy(ctx context.Context, req *policies.DeletePolicyReq) (*policies.DeletePolicyRes, error) { + _, res, err := s.deletePolicy.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.DeletePolicyRes), nil +} + +func (s *grpcServer) ListPolicies(ctx context.Context, req *policies.ListPoliciesReq) (*policies.ListPoliciesRes, error) { + _, res, err := s.listPolicies.ServeGRPC(ctx, req) + if err != nil { + return nil, encodeError(err) + } + return res.(*policies.ListPoliciesRes), nil +} + +func decodeAuthorizeRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.AuthorizeReq) + return authReq{Act: req.GetAct(), Obj: req.GetObj(), Sub: req.GetSub(), EntityType: req.GetEntityType()}, nil +} + +func encodeAuthorizeResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(authorizeRes) + return &policies.AuthorizeRes{Authorized: res.authorized}, nil +} + +func decodeIssueRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.IssueReq) + return issueReq{email: req.GetEmail(), password: req.GetPassword()}, nil +} + +func encodeIssueResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(issueRes) + return &policies.Token{Value: res.value}, nil +} + +func decodeIdentifyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.Token) + return identityReq{token: req.GetValue()}, nil +} + +func encodeIdentifyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(identityRes) + return &policies.UserIdentity{Id: res.id}, nil +} + +func decodeAddPolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.AddPolicyReq) + return addPolicyReq{Token: req.GetToken(), Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil +} + +func encodeAddPolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(addPolicyRes) + return &policies.AddPolicyRes{Authorized: res.authorized}, nil +} + +func decodeDeletePolicyRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.DeletePolicyReq) + return policyReq{Token: req.GetToken(), Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil +} + +func encodeDeletePolicyResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(deletePolicyRes) + return &policies.DeletePolicyRes{Deleted: res.deleted}, nil +} + +func decodeListPoliciesRequest(_ context.Context, grpcReq interface{}) (interface{}, error) { + req := grpcReq.(*policies.ListPoliciesReq) + return listPoliciesReq{Token: req.GetToken(), Sub: req.GetSub(), Obj: req.GetObj(), Act: req.GetAct()}, nil +} + +func encodeListPoliciesResponse(_ context.Context, grpcRes interface{}) (interface{}, error) { + res := grpcRes.(listPoliciesRes) + return &policies.ListPoliciesRes{Objects: res.objects}, nil +} + +func encodeError(err error) error { + switch { + case errors.Contains(err, nil): + return nil + case errors.Contains(err, errors.ErrMalformedEntity), + err == apiutil.ErrInvalidAuthKey, + err == apiutil.ErrMissingID, + err == apiutil.ErrBearerToken, + err == apiutil.ErrMissingPolicySub, + err == apiutil.ErrMissingPolicyObj, + err == apiutil.ErrMalformedPolicyAct, + err == apiutil.ErrMalformedPolicy, + err == apiutil.ErrMissingPolicyOwner, + err == apiutil.ErrHigherPolicyRank: + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Contains(err, errors.ErrAuthentication), + err == apiutil.ErrBearerToken: + return status.Error(codes.Unauthenticated, err.Error()) + case errors.Contains(err, errors.ErrAuthorization): + return status.Error(codes.PermissionDenied, err.Error()) + default: + return status.Error(codes.Internal, "internal server error") + } +} diff --git a/users/policies/api/http/endpoints.go b/users/policies/api/http/endpoints.go new file mode 100644 index 0000000000..b6963ba9be --- /dev/null +++ b/users/policies/api/http/endpoints.go @@ -0,0 +1,141 @@ +package api + +import ( + "context" + + "github.com/go-kit/kit/endpoint" + "github.com/mainflux/mainflux/users/policies" +) + +func authorizeEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(authorizeReq) + if err := req.validate(); err != nil { + return authorizeRes{authorized: false}, err + } + policy := policies.Policy{ + Subject: req.Subject, + Object: req.Object, + Actions: req.Actions, + } + err := svc.Authorize(ctx, req.EntityType, policy) + if err != nil { + return authorizeRes{authorized: false}, err + } + + return authorizeRes{authorized: true}, nil + } +} + +func createPolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(createPolicyReq) + if err := req.validate(); err != nil { + return addPolicyRes{}, err + } + + policy := policies.Policy{ + Subject: req.Subject, + Object: req.Object, + Actions: req.Actions, + } + err := svc.AddPolicy(ctx, req.token, policy) + if err != nil { + return addPolicyRes{}, err + } + + return addPolicyRes{created: true}, nil + } +} + +func updatePolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(updatePolicyReq) + if err := req.validate(); err != nil { + return updatePolicyRes{}, err + } + + policy := policies.Policy{ + Subject: req.Subject, + Object: req.Object, + Actions: req.Actions, + } + + err := svc.UpdatePolicy(ctx, req.token, policy) + if err != nil { + return updatePolicyRes{}, err + } + + res := updatePolicyRes{updated: false} + return res, nil + } +} + +func listPolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(listPolicyReq) + if err := req.validate(); err != nil { + return listPolicyRes{}, err + } + pm := policies.Page{ + Total: req.Total, + Offset: req.Offset, + Limit: req.Limit, + OwnerID: req.OwnerID, + Subject: req.Subject, + Object: req.Object, + Action: req.Actions, + } + page, err := svc.ListPolicy(ctx, req.token, pm) + if err != nil { + return listPolicyRes{}, err + } + return buildGroupsResponse(page), nil + } +} + +func deletePolicyEndpoint(svc policies.Service) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (interface{}, error) { + req := request.(deletePolicyReq) + if err := req.validate(); err != nil { + return deletePolicyRes{}, err + } + policy := policies.Policy{ + Subject: req.Subject, + Object: req.Object, + } + if err := svc.DeletePolicy(ctx, req.token, policy); err != nil { + return deletePolicyRes{}, err + } + + return deletePolicyRes{}, nil + } +} + +func toViewPolicyRes(group policies.Policy) viewPolicyRes { + return viewPolicyRes{ + OwnerID: group.OwnerID, + Subject: group.Subject, + Object: group.Object, + Actions: group.Actions, + CreatedAt: group.CreatedAt, + UpdatedAt: group.UpdatedAt, + } +} + +func buildGroupsResponse(page policies.PolicyPage) listPolicyRes { + res := listPolicyRes{ + pageRes: pageRes{ + Limit: page.Limit, + Offset: page.Offset, + Total: page.Total, + }, + Policies: []viewPolicyRes{}, + } + + for _, group := range page.Policies { + res.Policies = append(res.Policies, toViewPolicyRes(group)) + } + + return res +} diff --git a/users/policies/api/http/logging.go b/users/policies/api/http/logging.go new file mode 100644 index 0000000000..14f82d6e4b --- /dev/null +++ b/users/policies/api/http/logging.go @@ -0,0 +1,81 @@ +package api + +import ( + "context" + "fmt" + "time" + + mflog "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/users/policies" +) + +var _ policies.Service = (*loggingMiddleware)(nil) + +type loggingMiddleware struct { + logger mflog.Logger + svc policies.Service +} + +func LoggingMiddleware(svc policies.Service, logger mflog.Logger) policies.Service { + return &loggingMiddleware{logger, svc} +} + +func (lm *loggingMiddleware) Authorize(ctx context.Context, domain string, p policies.Policy) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method authorize for client %s took %s to complete", p.Subject, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.Authorize(ctx, domain, p) +} + +func (lm *loggingMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method add_policy for client %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.AddPolicy(ctx, token, p) +} + +func (lm *loggingMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method update_policy for client %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.UpdatePolicy(ctx, token, p) +} + +func (lm *loggingMiddleware) ListPolicy(ctx context.Context, token string, cp policies.Page) (cg policies.PolicyPage, err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method list_policy for client %s using token %s took %s to complete", cp.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.ListPolicy(ctx, token, cp) +} + +func (lm *loggingMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + message := fmt.Sprintf("Method delete_policy for client %s using token %s took %s to complete", p.Subject, token, time.Since(begin)) + if err != nil { + lm.logger.Warn(fmt.Sprintf("%s with error: %s.", message, err)) + return + } + lm.logger.Info(fmt.Sprintf("%s without errors.", message)) + }(time.Now()) + return lm.svc.DeletePolicy(ctx, token, p) +} diff --git a/users/policies/api/http/metrics.go b/users/policies/api/http/metrics.go new file mode 100644 index 0000000000..6f39d6333a --- /dev/null +++ b/users/policies/api/http/metrics.go @@ -0,0 +1,66 @@ +package api + +import ( + "context" + "time" + + "github.com/go-kit/kit/metrics" + "github.com/mainflux/mainflux/users/policies" +) + +var _ policies.Service = (*metricsMiddleware)(nil) + +type metricsMiddleware struct { + counter metrics.Counter + latency metrics.Histogram + svc policies.Service +} + +// MetricsMiddleware returns a new metrics middleware wrapper. +func MetricsMiddleware(svc policies.Service, counter metrics.Counter, latency metrics.Histogram) policies.Service { + return &metricsMiddleware{ + counter: counter, + latency: latency, + svc: svc, + } +} + +func (ms *metricsMiddleware) Authorize(ctx context.Context, entityType string, p policies.Policy) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "authorize").Add(1) + ms.latency.With("method", "authorize").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.Authorize(ctx, entityType, p) +} + +func (ms *metricsMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "add_policy").Add(1) + ms.latency.With("method", "add_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.AddPolicy(ctx, token, p) +} + +func (ms *metricsMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "update_policy").Add(1) + ms.latency.With("method", "update_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.UpdatePolicy(ctx, token, p) +} + +func (ms *metricsMiddleware) ListPolicy(ctx context.Context, token string, cp policies.Page) (cg policies.PolicyPage, err error) { + defer func(begin time.Time) { + ms.counter.With("method", "list_policies").Add(1) + ms.latency.With("method", "list_policies").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.ListPolicy(ctx, token, cp) +} + +func (ms *metricsMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) (err error) { + defer func(begin time.Time) { + ms.counter.With("method", "delete_policy").Add(1) + ms.latency.With("method", "delete_policy").Observe(time.Since(begin).Seconds()) + }(time.Now()) + return ms.svc.DeletePolicy(ctx, token, p) +} diff --git a/users/policies/api/http/requests.go b/users/policies/api/http/requests.go new file mode 100644 index 0000000000..43911701b9 --- /dev/null +++ b/users/policies/api/http/requests.go @@ -0,0 +1,110 @@ +package api + +import ( + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/users/policies" +) + +type authorizeReq struct { + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` + Actions []string `json:"actions,omitempty"` + EntityType string `json:"entity_type,omitempty"` +} + +func (req authorizeReq) validate() error { + for _, a := range req.Actions { + if ok := policies.ValidateAction(a); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + if req.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if req.Object == "" { + return apiutil.ErrMissingPolicyObj + } + return nil +} + +type createPolicyReq struct { + token string + Owner string `json:"owner,omitempty"` + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` + Actions []string `json:"actions,omitempty"` +} + +func (req createPolicyReq) validate() error { + for _, a := range req.Actions { + if ok := policies.ValidateAction(a); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + if req.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if req.Object == "" { + return apiutil.ErrMissingPolicyObj + } + return nil +} + +type updatePolicyReq struct { + token string + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` + Actions []string `json:"actions,omitempty"` +} + +func (req updatePolicyReq) validate() error { + for _, a := range req.Actions { + if ok := policies.ValidateAction(a); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + if req.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if req.Object == "" { + return apiutil.ErrMissingPolicyObj + } + return nil +} + +type listPolicyReq struct { + token string + Total uint64 + Offset uint64 + Limit uint64 + OwnerID string + Subject string + Object string + Actions string +} + +func (req listPolicyReq) validate() error { + if req.Actions != "" { + if ok := policies.ValidateAction(req.Actions); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + return nil +} + +type deletePolicyReq struct { + token string + Subject string `json:"subject,omitempty"` + Object string `json:"object,omitempty"` +} + +func (req deletePolicyReq) validate() error { + if req.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if req.Object == "" { + return apiutil.ErrMissingPolicyObj + } + + return nil +} diff --git a/users/policies/api/http/responses.go b/users/policies/api/http/responses.go new file mode 100644 index 0000000000..8174ca688c --- /dev/null +++ b/users/policies/api/http/responses.go @@ -0,0 +1,127 @@ +package api + +import ( + "net/http" + "time" + + "github.com/mainflux/mainflux" +) + +var ( + _ mainflux.Response = (*authorizeRes)(nil) + _ mainflux.Response = (*addPolicyRes)(nil) + _ mainflux.Response = (*viewPolicyRes)(nil) + _ mainflux.Response = (*listPolicyRes)(nil) + _ mainflux.Response = (*updatePolicyRes)(nil) + _ mainflux.Response = (*deletePolicyRes)(nil) +) + +type pageRes struct { + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Total uint64 `json:"total"` +} + +type authorizeRes struct { + authorized bool +} + +func (res authorizeRes) Code() int { + return http.StatusOK +} + +func (res authorizeRes) Headers() map[string]string { + return map[string]string{} +} + +func (res authorizeRes) Empty() bool { + return false +} + +type addPolicyRes struct { + created bool +} + +func (res addPolicyRes) Code() int { + if res.created { + return http.StatusCreated + } + + return http.StatusOK +} + +func (res addPolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res addPolicyRes) Empty() bool { + return true +} + +type viewPolicyRes struct { + OwnerID string `json:"owner_id"` + Subject string `json:"subject"` + Object string `json:"object"` + Actions []string `json:"actions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +func (res viewPolicyRes) Code() int { + return http.StatusOK +} + +func (res viewPolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res viewPolicyRes) Empty() bool { + return false +} + +type updatePolicyRes struct { + updated bool +} + +func (res updatePolicyRes) Code() int { + return http.StatusNoContent +} + +func (res updatePolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res updatePolicyRes) Empty() bool { + return true +} + +type listPolicyRes struct { + pageRes + Policies []viewPolicyRes `json:"policies"` +} + +func (res listPolicyRes) Code() int { + return http.StatusOK +} + +func (res listPolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res listPolicyRes) Empty() bool { + return false +} + +type deletePolicyRes struct{} + +func (res deletePolicyRes) Code() int { + return http.StatusNoContent +} + +func (res deletePolicyRes) Headers() map[string]string { + return map[string]string{} +} + +func (res deletePolicyRes) Empty() bool { + return true +} diff --git a/users/policies/api/http/transport.go b/users/policies/api/http/transport.go new file mode 100644 index 0000000000..9a30b0b3b7 --- /dev/null +++ b/users/policies/api/http/transport.go @@ -0,0 +1,164 @@ +package api + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + kithttp "github.com/go-kit/kit/transport/http" + "github.com/go-zoo/bone" + "github.com/mainflux/mainflux/internal/api" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" +) + +// MakeHandler returns a HTTP handler for API endpoints. +func MakePolicyHandler(svc policies.Service, mux *bone.Mux, logger logger.Logger) http.Handler { + opts := []kithttp.ServerOption{ + kithttp.ServerErrorEncoder(apiutil.LoggingErrorEncoder(logger, api.EncodeError)), + } + mux.Post("/authorize", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("authorize"))(authorizeEndpoint(svc)), + decodeAuthorize, + api.EncodeResponse, + opts..., + )) + + mux.Post("/policies", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("add_policy"))(createPolicyEndpoint(svc)), + decodePolicyCreate, + api.EncodeResponse, + opts..., + )) + + mux.Put("/policies", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("update_policy"))(updatePolicyEndpoint(svc)), + decodePolicyUpdate, + api.EncodeResponse, + opts..., + )) + + mux.Get("/policies", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("list_policies"))(listPolicyEndpoint(svc)), + decodeListPoliciesRequest, + api.EncodeResponse, + opts..., + )) + + mux.Delete("/policies/:subject/:object", kithttp.NewServer( + otelkit.EndpointMiddleware(otelkit.WithOperation("delete_policy"))(deletePolicyEndpoint(svc)), + deletePolicyRequest, + api.EncodeResponse, + opts..., + )) + + return mux +} + +func decodeAuthorize(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var authReq authorizeReq + if err := json.NewDecoder(r.Body).Decode(&authReq); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + return authReq, nil +} + +func decodePolicyCreate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + + var m policies.Policy + if err := json.NewDecoder(r.Body).Decode(&m); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + req := createPolicyReq{ + token: apiutil.ExtractBearerToken(r), + Subject: m.Subject, + Object: m.Object, + Actions: m.Actions, + } + return req, nil +} + +func decodePolicyUpdate(_ context.Context, r *http.Request) (interface{}, error) { + if !strings.Contains(r.Header.Get("Content-Type"), api.ContentType) { + return nil, errors.ErrUnsupportedContentType + } + var m policies.Policy + if err := json.NewDecoder(r.Body).Decode(&m); err != nil { + return nil, errors.Wrap(errors.ErrMalformedEntity, err) + } + + req := updatePolicyReq{ + token: apiutil.ExtractBearerToken(r), + Subject: m.Subject, + Object: m.Object, + Actions: m.Actions, + } + + return req, nil +} + +func decodeListPoliciesRequest(_ context.Context, r *http.Request) (interface{}, error) { + total, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + offset, err := apiutil.ReadNumQuery[uint64](r, api.OffsetKey, api.DefOffset) + if err != nil { + return nil, err + } + limit, err := apiutil.ReadNumQuery[uint64](r, api.LimitKey, api.DefLimit) + if err != nil { + return nil, err + } + ownerID, err := apiutil.ReadStringQuery(r, api.OwnerKey, "") + if err != nil { + return nil, err + } + subject, err := apiutil.ReadStringQuery(r, api.SubjectKey, "") + if err != nil { + return nil, err + } + object, err := apiutil.ReadStringQuery(r, api.ObjectKey, "") + if err != nil { + return nil, err + } + action, err := apiutil.ReadStringQuery(r, api.ActionKey, "") + if err != nil { + return nil, err + } + + req := listPolicyReq{ + token: apiutil.ExtractBearerToken(r), + Total: total, + Offset: offset, + Limit: limit, + OwnerID: ownerID, + Subject: subject, + Object: object, + Actions: action, + } + return req, nil +} + +func deletePolicyRequest(_ context.Context, r *http.Request) (interface{}, error) { + req := deletePolicyReq{ + token: apiutil.ExtractBearerToken(r), + Subject: bone.GetValue(r, "subject"), + Object: bone.GetValue(r, "object"), + } + + return req, nil +} diff --git a/users/policies/auth.pb.go b/users/policies/auth.pb.go new file mode 100644 index 0000000000..9aab13e9ed --- /dev/null +++ b/users/policies/auth.pb.go @@ -0,0 +1,945 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: users/policies/auth.proto + +package policies + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthorizeReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sub string `protobuf:"bytes,1,opt,name=sub,proto3" json:"sub,omitempty"` + Obj string `protobuf:"bytes,2,opt,name=obj,proto3" json:"obj,omitempty"` + Act string `protobuf:"bytes,3,opt,name=act,proto3" json:"act,omitempty"` + EntityType string `protobuf:"bytes,4,opt,name=entityType,proto3" json:"entityType,omitempty"` +} + +func (x *AuthorizeReq) Reset() { + *x = AuthorizeReq{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthorizeReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizeReq) ProtoMessage() {} + +func (x *AuthorizeReq) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizeReq.ProtoReflect.Descriptor instead. +func (*AuthorizeReq) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthorizeReq) GetSub() string { + if x != nil { + return x.Sub + } + return "" +} + +func (x *AuthorizeReq) GetObj() string { + if x != nil { + return x.Obj + } + return "" +} + +func (x *AuthorizeReq) GetAct() string { + if x != nil { + return x.Act + } + return "" +} + +func (x *AuthorizeReq) GetEntityType() string { + if x != nil { + return x.EntityType + } + return "" +} + +type AuthorizeRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Authorized bool `protobuf:"varint,1,opt,name=authorized,proto3" json:"authorized,omitempty"` +} + +func (x *AuthorizeRes) Reset() { + *x = AuthorizeRes{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthorizeRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizeRes) ProtoMessage() {} + +func (x *AuthorizeRes) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizeRes.ProtoReflect.Descriptor instead. +func (*AuthorizeRes) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthorizeRes) GetAuthorized() bool { + if x != nil { + return x.Authorized + } + return false +} + +type IssueReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Type uint32 `protobuf:"varint,3,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *IssueReq) Reset() { + *x = IssueReq{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IssueReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IssueReq) ProtoMessage() {} + +func (x *IssueReq) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IssueReq.ProtoReflect.Descriptor instead. +func (*IssueReq) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{2} +} + +func (x *IssueReq) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *IssueReq) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *IssueReq) GetType() uint32 { + if x != nil { + return x.Type + } + return 0 +} + +type Token struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Token) Reset() { + *x = Token{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Token) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Token) ProtoMessage() {} + +func (x *Token) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Token.ProtoReflect.Descriptor instead. +func (*Token) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{3} +} + +func (x *Token) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type UserIdentity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *UserIdentity) Reset() { + *x = UserIdentity{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UserIdentity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UserIdentity) ProtoMessage() {} + +func (x *UserIdentity) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UserIdentity.ProtoReflect.Descriptor instead. +func (*UserIdentity) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{4} +} + +func (x *UserIdentity) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +type AddPolicyReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + Sub string `protobuf:"bytes,2,opt,name=sub,proto3" json:"sub,omitempty"` + Obj string `protobuf:"bytes,3,opt,name=obj,proto3" json:"obj,omitempty"` + Act []string `protobuf:"bytes,4,rep,name=act,proto3" json:"act,omitempty"` +} + +func (x *AddPolicyReq) Reset() { + *x = AddPolicyReq{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddPolicyReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddPolicyReq) ProtoMessage() {} + +func (x *AddPolicyReq) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddPolicyReq.ProtoReflect.Descriptor instead. +func (*AddPolicyReq) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{5} +} + +func (x *AddPolicyReq) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *AddPolicyReq) GetSub() string { + if x != nil { + return x.Sub + } + return "" +} + +func (x *AddPolicyReq) GetObj() string { + if x != nil { + return x.Obj + } + return "" +} + +func (x *AddPolicyReq) GetAct() []string { + if x != nil { + return x.Act + } + return nil +} + +type AddPolicyRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Authorized bool `protobuf:"varint,1,opt,name=authorized,proto3" json:"authorized,omitempty"` +} + +func (x *AddPolicyRes) Reset() { + *x = AddPolicyRes{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddPolicyRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddPolicyRes) ProtoMessage() {} + +func (x *AddPolicyRes) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddPolicyRes.ProtoReflect.Descriptor instead. +func (*AddPolicyRes) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{6} +} + +func (x *AddPolicyRes) GetAuthorized() bool { + if x != nil { + return x.Authorized + } + return false +} + +type DeletePolicyReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + Sub string `protobuf:"bytes,2,opt,name=sub,proto3" json:"sub,omitempty"` + Obj string `protobuf:"bytes,3,opt,name=obj,proto3" json:"obj,omitempty"` + Act string `protobuf:"bytes,4,opt,name=act,proto3" json:"act,omitempty"` +} + +func (x *DeletePolicyReq) Reset() { + *x = DeletePolicyReq{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeletePolicyReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeletePolicyReq) ProtoMessage() {} + +func (x *DeletePolicyReq) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeletePolicyReq.ProtoReflect.Descriptor instead. +func (*DeletePolicyReq) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{7} +} + +func (x *DeletePolicyReq) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *DeletePolicyReq) GetSub() string { + if x != nil { + return x.Sub + } + return "" +} + +func (x *DeletePolicyReq) GetObj() string { + if x != nil { + return x.Obj + } + return "" +} + +func (x *DeletePolicyReq) GetAct() string { + if x != nil { + return x.Act + } + return "" +} + +type DeletePolicyRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Deleted bool `protobuf:"varint,1,opt,name=deleted,proto3" json:"deleted,omitempty"` +} + +func (x *DeletePolicyRes) Reset() { + *x = DeletePolicyRes{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeletePolicyRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeletePolicyRes) ProtoMessage() {} + +func (x *DeletePolicyRes) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeletePolicyRes.ProtoReflect.Descriptor instead. +func (*DeletePolicyRes) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{8} +} + +func (x *DeletePolicyRes) GetDeleted() bool { + if x != nil { + return x.Deleted + } + return false +} + +type ListPoliciesReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + Sub string `protobuf:"bytes,2,opt,name=sub,proto3" json:"sub,omitempty"` + Obj string `protobuf:"bytes,3,opt,name=obj,proto3" json:"obj,omitempty"` + Act string `protobuf:"bytes,4,opt,name=act,proto3" json:"act,omitempty"` +} + +func (x *ListPoliciesReq) Reset() { + *x = ListPoliciesReq{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPoliciesReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPoliciesReq) ProtoMessage() {} + +func (x *ListPoliciesReq) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPoliciesReq.ProtoReflect.Descriptor instead. +func (*ListPoliciesReq) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{9} +} + +func (x *ListPoliciesReq) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *ListPoliciesReq) GetSub() string { + if x != nil { + return x.Sub + } + return "" +} + +func (x *ListPoliciesReq) GetObj() string { + if x != nil { + return x.Obj + } + return "" +} + +func (x *ListPoliciesReq) GetAct() string { + if x != nil { + return x.Act + } + return "" +} + +type ListPoliciesRes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Objects []string `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` +} + +func (x *ListPoliciesRes) Reset() { + *x = ListPoliciesRes{} + if protoimpl.UnsafeEnabled { + mi := &file_users_policies_auth_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListPoliciesRes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPoliciesRes) ProtoMessage() {} + +func (x *ListPoliciesRes) ProtoReflect() protoreflect.Message { + mi := &file_users_policies_auth_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPoliciesRes.ProtoReflect.Descriptor instead. +func (*ListPoliciesRes) Descriptor() ([]byte, []int) { + return file_users_policies_auth_proto_rawDescGZIP(), []int{10} +} + +func (x *ListPoliciesRes) GetObjects() []string { + if x != nil { + return x.Objects + } + return nil +} + +var File_users_policies_auth_proto protoreflect.FileDescriptor + +var file_users_policies_auth_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x6d, 0x61, 0x69, + 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x22, 0x2e, 0x0a, 0x0c, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x22, 0x50, 0x0a, 0x08, 0x49, 0x73, + 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x1d, 0x0a, 0x05, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1e, 0x0a, 0x0c, 0x55, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x5a, 0x0a, 0x0c, 0x41, + 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x73, 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x22, 0x2e, 0x0a, 0x0c, 0x41, 0x64, 0x64, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x22, 0x5d, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, + 0x75, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6f, 0x62, 0x6a, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x61, 0x63, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x22, 0x5d, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x10, 0x0a, 0x03, + 0x73, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x75, 0x62, 0x12, 0x10, + 0x0a, 0x03, 0x6f, 0x62, 0x6a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x62, 0x6a, + 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, + 0x63, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x32, + 0xb6, 0x04, 0x0a, 0x0b, 0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x5b, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x25, 0x2e, 0x6d, + 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x71, 0x1a, 0x25, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, + 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x05, + 0x49, 0x73, 0x73, 0x75, 0x65, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, + 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x49, 0x73, 0x73, 0x75, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, + 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x08, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, + 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x25, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, + 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x00, 0x12, + 0x5b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x6d, + 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x1a, 0x25, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, + 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x41, 0x64, + 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0c, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x2e, 0x6d, + 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x1a, 0x28, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, + 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, + 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, + 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x28, 0x2e, 0x6d, + 0x61, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_users_policies_auth_proto_rawDescOnce sync.Once + file_users_policies_auth_proto_rawDescData = file_users_policies_auth_proto_rawDesc +) + +func file_users_policies_auth_proto_rawDescGZIP() []byte { + file_users_policies_auth_proto_rawDescOnce.Do(func() { + file_users_policies_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_users_policies_auth_proto_rawDescData) + }) + return file_users_policies_auth_proto_rawDescData +} + +var file_users_policies_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_users_policies_auth_proto_goTypes = []interface{}{ + (*AuthorizeReq)(nil), // 0: mainflux.users.policies.AuthorizeReq + (*AuthorizeRes)(nil), // 1: mainflux.users.policies.AuthorizeRes + (*IssueReq)(nil), // 2: mainflux.users.policies.IssueReq + (*Token)(nil), // 3: mainflux.users.policies.Token + (*UserIdentity)(nil), // 4: mainflux.users.policies.UserIdentity + (*AddPolicyReq)(nil), // 5: mainflux.users.policies.AddPolicyReq + (*AddPolicyRes)(nil), // 6: mainflux.users.policies.AddPolicyRes + (*DeletePolicyReq)(nil), // 7: mainflux.users.policies.DeletePolicyReq + (*DeletePolicyRes)(nil), // 8: mainflux.users.policies.DeletePolicyRes + (*ListPoliciesReq)(nil), // 9: mainflux.users.policies.ListPoliciesReq + (*ListPoliciesRes)(nil), // 10: mainflux.users.policies.ListPoliciesRes +} +var file_users_policies_auth_proto_depIdxs = []int32{ + 0, // 0: mainflux.users.policies.AuthService.Authorize:input_type -> mainflux.users.policies.AuthorizeReq + 2, // 1: mainflux.users.policies.AuthService.Issue:input_type -> mainflux.users.policies.IssueReq + 3, // 2: mainflux.users.policies.AuthService.Identify:input_type -> mainflux.users.policies.Token + 5, // 3: mainflux.users.policies.AuthService.AddPolicy:input_type -> mainflux.users.policies.AddPolicyReq + 7, // 4: mainflux.users.policies.AuthService.DeletePolicy:input_type -> mainflux.users.policies.DeletePolicyReq + 9, // 5: mainflux.users.policies.AuthService.ListPolicies:input_type -> mainflux.users.policies.ListPoliciesReq + 1, // 6: mainflux.users.policies.AuthService.Authorize:output_type -> mainflux.users.policies.AuthorizeRes + 3, // 7: mainflux.users.policies.AuthService.Issue:output_type -> mainflux.users.policies.Token + 4, // 8: mainflux.users.policies.AuthService.Identify:output_type -> mainflux.users.policies.UserIdentity + 6, // 9: mainflux.users.policies.AuthService.AddPolicy:output_type -> mainflux.users.policies.AddPolicyRes + 8, // 10: mainflux.users.policies.AuthService.DeletePolicy:output_type -> mainflux.users.policies.DeletePolicyRes + 10, // 11: mainflux.users.policies.AuthService.ListPolicies:output_type -> mainflux.users.policies.ListPoliciesRes + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_users_policies_auth_proto_init() } +func file_users_policies_auth_proto_init() { + if File_users_policies_auth_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_users_policies_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthorizeReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthorizeRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IssueReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Token); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UserIdentity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddPolicyReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddPolicyRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeletePolicyReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeletePolicyRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPoliciesReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_users_policies_auth_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPoliciesRes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_users_policies_auth_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_users_policies_auth_proto_goTypes, + DependencyIndexes: file_users_policies_auth_proto_depIdxs, + MessageInfos: file_users_policies_auth_proto_msgTypes, + }.Build() + File_users_policies_auth_proto = out.File + file_users_policies_auth_proto_rawDesc = nil + file_users_policies_auth_proto_goTypes = nil + file_users_policies_auth_proto_depIdxs = nil +} diff --git a/users/policies/auth.proto b/users/policies/auth.proto new file mode 100644 index 0000000000..64dd8cc47d --- /dev/null +++ b/users/policies/auth.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package mainflux.users.policies; + +option go_package = "./policies"; + +service AuthService { + rpc Authorize(AuthorizeReq) returns (AuthorizeRes) {} + rpc Issue(IssueReq) returns (Token) {} + rpc Identify(Token) returns (UserIdentity) {} + rpc AddPolicy(AddPolicyReq) returns (AddPolicyRes) {} + rpc DeletePolicy(DeletePolicyReq) returns (DeletePolicyRes) {} + rpc ListPolicies(ListPoliciesReq) returns (ListPoliciesRes) {} +} + +message AuthorizeReq { + string sub = 1; + string obj = 2; + string act = 3; + string entityType = 4; +} + +message AuthorizeRes { + bool authorized = 1; +} + +message IssueReq { + string email = 1; + string password = 2; + uint32 type = 3; +} + +message Token { + string value = 1; +} + +message UserIdentity { + string id = 1; +} + +message AddPolicyReq { + string token = 1; + string sub = 2; + string obj = 3; + repeated string act = 4; +} + +message AddPolicyRes { + bool authorized = 1; +} + +message DeletePolicyReq { + string token = 1; + string sub = 2; + string obj = 3; + string act = 4; +} + +message DeletePolicyRes { + bool deleted = 1; +} + +message ListPoliciesReq { + string token = 1; + string sub = 2; + string obj = 3; + string act = 4; +} + +message ListPoliciesRes { + repeated string objects = 1; +} diff --git a/users/policies/auth_grpc.pb.go b/users/policies/auth_grpc.pb.go new file mode 100644 index 0000000000..af14230b67 --- /dev/null +++ b/users/policies/auth_grpc.pb.go @@ -0,0 +1,285 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.12 +// source: users/policies/auth.proto + +package policies + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// AuthServiceClient is the client API for AuthService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AuthServiceClient interface { + Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) + Issue(ctx context.Context, in *IssueReq, opts ...grpc.CallOption) (*Token, error) + Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*UserIdentity, error) + AddPolicy(ctx context.Context, in *AddPolicyReq, opts ...grpc.CallOption) (*AddPolicyRes, error) + DeletePolicy(ctx context.Context, in *DeletePolicyReq, opts ...grpc.CallOption) (*DeletePolicyRes, error) + ListPolicies(ctx context.Context, in *ListPoliciesReq, opts ...grpc.CallOption) (*ListPoliciesRes, error) +} + +type authServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAuthServiceClient(cc grpc.ClientConnInterface) AuthServiceClient { + return &authServiceClient{cc} +} + +func (c *authServiceClient) Authorize(ctx context.Context, in *AuthorizeReq, opts ...grpc.CallOption) (*AuthorizeRes, error) { + out := new(AuthorizeRes) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/Authorize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) Issue(ctx context.Context, in *IssueReq, opts ...grpc.CallOption) (*Token, error) { + out := new(Token) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/Issue", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) Identify(ctx context.Context, in *Token, opts ...grpc.CallOption) (*UserIdentity, error) { + out := new(UserIdentity) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/Identify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) AddPolicy(ctx context.Context, in *AddPolicyReq, opts ...grpc.CallOption) (*AddPolicyRes, error) { + out := new(AddPolicyRes) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/AddPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) DeletePolicy(ctx context.Context, in *DeletePolicyReq, opts ...grpc.CallOption) (*DeletePolicyRes, error) { + out := new(DeletePolicyRes) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/DeletePolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authServiceClient) ListPolicies(ctx context.Context, in *ListPoliciesReq, opts ...grpc.CallOption) (*ListPoliciesRes, error) { + out := new(ListPoliciesRes) + err := c.cc.Invoke(ctx, "/mainflux.users.policies.AuthService/ListPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AuthServiceServer is the server API for AuthService service. +// All implementations must embed UnimplementedAuthServiceServer +// for forward compatibility +type AuthServiceServer interface { + Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) + Issue(context.Context, *IssueReq) (*Token, error) + Identify(context.Context, *Token) (*UserIdentity, error) + AddPolicy(context.Context, *AddPolicyReq) (*AddPolicyRes, error) + DeletePolicy(context.Context, *DeletePolicyReq) (*DeletePolicyRes, error) + ListPolicies(context.Context, *ListPoliciesReq) (*ListPoliciesRes, error) + mustEmbedUnimplementedAuthServiceServer() +} + +// UnimplementedAuthServiceServer must be embedded to have forward compatible implementations. +type UnimplementedAuthServiceServer struct { +} + +func (UnimplementedAuthServiceServer) Authorize(context.Context, *AuthorizeReq) (*AuthorizeRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method Authorize not implemented") +} +func (UnimplementedAuthServiceServer) Issue(context.Context, *IssueReq) (*Token, error) { + return nil, status.Errorf(codes.Unimplemented, "method Issue not implemented") +} +func (UnimplementedAuthServiceServer) Identify(context.Context, *Token) (*UserIdentity, error) { + return nil, status.Errorf(codes.Unimplemented, "method Identify not implemented") +} +func (UnimplementedAuthServiceServer) AddPolicy(context.Context, *AddPolicyReq) (*AddPolicyRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddPolicy not implemented") +} +func (UnimplementedAuthServiceServer) DeletePolicy(context.Context, *DeletePolicyReq) (*DeletePolicyRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeletePolicy not implemented") +} +func (UnimplementedAuthServiceServer) ListPolicies(context.Context, *ListPoliciesReq) (*ListPoliciesRes, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPolicies not implemented") +} +func (UnimplementedAuthServiceServer) mustEmbedUnimplementedAuthServiceServer() {} + +// UnsafeAuthServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AuthServiceServer will +// result in compilation errors. +type UnsafeAuthServiceServer interface { + mustEmbedUnimplementedAuthServiceServer() +} + +func RegisterAuthServiceServer(s grpc.ServiceRegistrar, srv AuthServiceServer) { + s.RegisterService(&AuthService_ServiceDesc, srv) +} + +func _AuthService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthorizeReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).Authorize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/Authorize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).Authorize(ctx, req.(*AuthorizeReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _AuthService_Issue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IssueReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).Issue(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/Issue", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).Issue(ctx, req.(*IssueReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _AuthService_Identify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Token) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).Identify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/Identify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).Identify(ctx, req.(*Token)) + } + return interceptor(ctx, in, info, handler) +} + +func _AuthService_AddPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddPolicyReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).AddPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/AddPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).AddPolicy(ctx, req.(*AddPolicyReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _AuthService_DeletePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeletePolicyReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).DeletePolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/DeletePolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).DeletePolicy(ctx, req.(*DeletePolicyReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _AuthService_ListPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPoliciesReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServiceServer).ListPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mainflux.users.policies.AuthService/ListPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServiceServer).ListPolicies(ctx, req.(*ListPoliciesReq)) + } + return interceptor(ctx, in, info, handler) +} + +// AuthService_ServiceDesc is the grpc.ServiceDesc for AuthService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AuthService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "mainflux.users.policies.AuthService", + HandlerType: (*AuthServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Authorize", + Handler: _AuthService_Authorize_Handler, + }, + { + MethodName: "Issue", + Handler: _AuthService_Issue_Handler, + }, + { + MethodName: "Identify", + Handler: _AuthService_Identify_Handler, + }, + { + MethodName: "AddPolicy", + Handler: _AuthService_AddPolicy_Handler, + }, + { + MethodName: "DeletePolicy", + Handler: _AuthService_DeletePolicy_Handler, + }, + { + MethodName: "ListPolicies", + Handler: _AuthService_ListPolicies_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "users/policies/auth.proto", +} diff --git a/users/policies/mocks/policies.go b/users/policies/mocks/policies.go new file mode 100644 index 0000000000..52d59ab8d0 --- /dev/null +++ b/users/policies/mocks/policies.go @@ -0,0 +1,48 @@ +package mocks + +import ( + "context" + + "github.com/mainflux/mainflux/users/policies" + "github.com/stretchr/testify/mock" +) + +type PolicyRepository struct { + mock.Mock +} + +func (m *PolicyRepository) Delete(ctx context.Context, p policies.Policy) error { + ret := m.Called(ctx, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) Retrieve(ctx context.Context, pm policies.Page) (policies.PolicyPage, error) { + ret := m.Called(ctx, pm) + + return ret.Get(0).(policies.PolicyPage), ret.Error(1) +} + +func (m *PolicyRepository) Save(ctx context.Context, p policies.Policy) error { + ret := m.Called(ctx, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) Update(ctx context.Context, p policies.Policy) error { + ret := m.Called(ctx, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) Evaluate(ctx context.Context, entityType string, p policies.Policy) error { + ret := m.Called(ctx, entityType, p) + + return ret.Error(0) +} + +func (m *PolicyRepository) CheckAdmin(ctx context.Context, id string) error { + ret := m.Called(ctx, id) + + return ret.Error(0) +} diff --git a/users/policies/page.go b/users/policies/page.go new file mode 100644 index 0000000000..641e2d8ed7 --- /dev/null +++ b/users/policies/page.go @@ -0,0 +1,28 @@ +package policies + +import "github.com/mainflux/mainflux/internal/apiutil" + +// Metadata represents arbitrary JSON. +type Metadata map[string]interface{} + +// Page contains page metadata that helps navigation. +type Page struct { + Total uint64 `json:"total"` + Offset uint64 `json:"offset"` + Limit uint64 `json:"limit"` + OwnerID string + Subject string + Object string + Action string + Tag string +} + +// Validate check page actions. +func (p Page) Validate() error { + if p.Action != "" { + if ok := ValidateAction(p.Action); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + return nil +} diff --git a/users/policies/policies.go b/users/policies/policies.go new file mode 100644 index 0000000000..fb3da5774a --- /dev/null +++ b/users/policies/policies.go @@ -0,0 +1,108 @@ +package policies + +import ( + "context" + "time" + + "github.com/mainflux/mainflux/internal/apiutil" +) + +// PolicyTypes contains a list of the available policy types currently supported +var PolicyTypes = []string{"c_delete", "c_update", "c_add", "c_list", "g_delete", "g_update", "g_add", "g_list", "m_write", "m_read"} + +// Policy represents an argument struct for making a policy related function calls. +type Policy struct { + OwnerID string `json:"owner_id"` + Subject string `json:"subject"` + Object string `json:"object"` + Actions []string `json:"actions"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` +} + +// PolicyPage contains a page of policies. +type PolicyPage struct { + Page + Policies []Policy +} + +// PolicyRepository specifies an account persistence API. +type PolicyRepository interface { + // Save creates a policy for the given Subject, so that, after + // Save, `Subject` has a `relation` on `group_id`. Returns a non-nil + // error in case of failures. + Save(ctx context.Context, p Policy) error + + // CheckAdmin checks if the user is an admin user + CheckAdmin(ctx context.Context, id string) error + + // Evaluate is used to evaluate if you have the correct permissions. + // We evaluate if we are in the same group first then evaluate if the + // object has that action over the subject + Evaluate(ctx context.Context, entityType string, p Policy) error + + // Update updates the policy type. + Update(ctx context.Context, p Policy) error + + // Retrieve retrieves policy for a given input. + Retrieve(ctx context.Context, pm Page) (PolicyPage, error) + + // Delete deletes the policy + Delete(ctx context.Context, p Policy) error +} + +// PolicyService represents a authorization service. It exposes +// functionalities through `auth` to perform authorization. +type PolicyService interface { + // Authorize checks authorization of the given `subject`. Basically, + // Authorize verifies that Is `subject` allowed to `relation` on + // `object`. Authorize returns a non-nil error if the subject has + // no relation on the object (which simply means the operation is + // denied). + Authorize(ctx context.Context, entityType string, p Policy) error + + // AddPolicy creates a policy for the given subject, so that, after + // AddPolicy, `subject` has a `relation` on `object`. Returns a non-nil + // error in case of failures. + AddPolicy(ctx context.Context, token string, p Policy) error + + // UpdatePolicy updates policies based on the given policy structure. + UpdatePolicy(ctx context.Context, token string, p Policy) error + + // ListPolicy lists policies based on the given policy structure. + ListPolicy(ctx context.Context, token string, pm Page) (PolicyPage, error) + + // DeletePolicy removes a policy. + DeletePolicy(ctx context.Context, token string, p Policy) error +} + +// Validate returns an error if policy representation is invalid. +func (p Policy) Validate() error { + if p.Subject == "" { + return apiutil.ErrMissingPolicySub + } + if p.Object == "" { + return apiutil.ErrMissingPolicyObj + } + if len(p.Actions) == 0 { + return apiutil.ErrMalformedPolicyAct + } + for _, p := range p.Actions { + if ok := ValidateAction(p); !ok { + return apiutil.ErrMalformedPolicyAct + } + } + return nil +} + +// ValidateAction check if the action is in policies +func ValidateAction(act string) bool { + for _, v := range PolicyTypes { + if v == act { + return true + } + } + return false + +} diff --git a/users/policies/postgres/doc.go b/users/policies/postgres/doc.go new file mode 100644 index 0000000000..bf560bea28 --- /dev/null +++ b/users/policies/postgres/doc.go @@ -0,0 +1 @@ +package postgres diff --git a/users/policies/postgres/policies.go b/users/policies/postgres/policies.go new file mode 100644 index 0000000000..12857dc929 --- /dev/null +++ b/users/policies/postgres/policies.go @@ -0,0 +1,286 @@ +package postgres + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/jackc/pgtype" + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/policies" +) + +var _ policies.PolicyRepository = (*policyRepository)(nil) + +var ( + // ErrInvalidEntityType indicates that the entity type is invalid. + ErrInvalidEntityType = errors.New("invalid entity type") +) + +type policyRepository struct { + db postgres.Database +} + +// NewPolicyRepo instantiates a PostgreSQL implementclients.Serviceation of policy repository. +func NewPolicyRepo(db postgres.Database) policies.PolicyRepository { + return &policyRepository{ + db: db, + } +} + +func (pr policyRepository) Save(ctx context.Context, policy policies.Policy) error { + q := `INSERT INTO policies (owner_id, subject, object, actions, created_at) + VALUES (:owner_id, :subject, :object, :actions, :created_at)` + + dbp, err := toDBPolicy(policy) + if err != nil { + return errors.Wrap(errors.ErrCreateEntity, err) + } + + row, err := pr.db.NamedQueryContext(ctx, q, dbp) + if err != nil { + return postgres.HandleError(err, errors.ErrCreateEntity) + } + + defer row.Close() + + return nil +} + +func (pr policyRepository) CheckAdmin(ctx context.Context, id string) error { + q := fmt.Sprintf(`SELECT id FROM clients WHERE id = '%s' AND role = '%d';`, id, clients.AdminRole) + + var clientID string + if err := pr.db.QueryRowxContext(ctx, q).Scan(&clientID); err != nil { + return errors.Wrap(errors.ErrAuthorization, err) + } + if clientID == "" { + return errors.ErrAuthorization + } + + return nil +} + +func (pr policyRepository) Evaluate(ctx context.Context, entityType string, policy policies.Policy) error { + q := "" + switch entityType { + case "client": + // Evaluates if two clients are connected to the same group and the subject has the specified action + // or subject is the owner of the object + q = fmt.Sprintf(`SELECT COALESCE(p.subject, c.id) as subject FROM policies p + JOIN policies p2 ON p.object = p2.object LEFT JOIN clients c ON c.owner_id = :subject AND c.id = :object + WHERE (p.subject = :subject AND p2.subject = :object AND '%s' = ANY(p.actions)) OR (c.id IS NOT NULL) LIMIT 1;`, + policy.Actions[0]) + case "group": + // Evaluates if client is connected to the specified group and has the required action + q = fmt.Sprintf(`SELECT DISTINCT policies.subject FROM policies + LEFT JOIN groups ON groups.owner_id = policies.subject AND groups.id = policies.object + WHERE policies.subject = :subject AND policies.object = :object AND '%s' = ANY(policies.actions) + LIMIT 1`, policy.Actions[0]) + default: + return ErrInvalidEntityType + } + + dbu, err := toDBPolicy(policy) + if err != nil { + return errors.Wrap(errors.ErrAuthorization, err) + } + row, err := pr.db.NamedQueryContext(ctx, q, dbu) + if err != nil { + return postgres.HandleError(err, errors.ErrAuthorization) + } + + defer row.Close() + + if ok := row.Next(); !ok { + return errors.Wrap(errors.ErrAuthorization, row.Err()) + } + var rPolicy dbPolicy + if err := row.StructScan(&rPolicy); err != nil { + return err + } + return nil +} + +func (pr policyRepository) Update(ctx context.Context, policy policies.Policy) error { + if err := policy.Validate(); err != nil { + return errors.Wrap(errors.ErrCreateEntity, err) + } + q := `UPDATE policies SET actions = :actions, updated_at = :updated_at, updated_by = :updated_by + WHERE subject = :subject AND object = :object` + + dbu, err := toDBPolicy(policy) + if err != nil { + return errors.Wrap(errors.ErrUpdateEntity, err) + } + + if _, err := pr.db.NamedExecContext(ctx, q, dbu); err != nil { + return errors.Wrap(errors.ErrUpdateEntity, err) + } + + return nil +} + +func (pr policyRepository) Retrieve(ctx context.Context, pm policies.Page) (policies.PolicyPage, error) { + var query []string + var emq string + + if pm.OwnerID != "" { + query = append(query, "owner_id = :owner_id") + } + if pm.Subject != "" { + query = append(query, "subject = :subject") + } + if pm.Object != "" { + query = append(query, "object = :object") + } + if pm.Action != "" { + query = append(query, ":action = ANY (actions)") + } + + if len(query) > 0 { + emq = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) + } + + q := fmt.Sprintf(`SELECT owner_id, subject, object, actions, created_at, updated_at, updated_by + FROM policies %s ORDER BY updated_at LIMIT :limit OFFSET :offset;`, emq) + + dbPage, err := toDBPoliciesPage(pm) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + rows, err := pr.db.NamedQueryContext(ctx, q, dbPage) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + defer rows.Close() + + var items []policies.Policy + for rows.Next() { + dbp := dbPolicy{} + if err := rows.StructScan(&dbp); err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + policy, err := toPolicy(dbp) + if err != nil { + return policies.PolicyPage{}, err + } + + items = append(items, policy) + } + + cq := fmt.Sprintf(`SELECT COUNT(*) FROM policies %s;`, emq) + + total, err := postgres.Total(ctx, pr.db, cq, dbPage) + if err != nil { + return policies.PolicyPage{}, errors.Wrap(errors.ErrViewEntity, err) + } + + page := policies.PolicyPage{ + Policies: items, + Page: policies.Page{ + Total: total, + Offset: pm.Offset, + Limit: pm.Limit, + }, + } + + return page, nil +} + +func (pr policyRepository) Delete(ctx context.Context, p policies.Policy) error { + dbp := dbPolicy{ + Subject: p.Subject, + Object: p.Object, + } + q := `DELETE FROM policies WHERE subject = :subject AND object = :object` + if _, err := pr.db.NamedExecContext(ctx, q, dbp); err != nil { + return errors.Wrap(errors.ErrRemoveEntity, err) + } + return nil +} + +type dbPolicy struct { + OwnerID string `db:"owner_id"` + Subject string `db:"subject"` + Object string `db:"object"` + Actions pgtype.TextArray `db:"actions"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt sql.NullTime `db:"updated_at,omitempty"` + UpdatedBy *string `db:"updated_by,omitempty"` +} + +func toDBPolicy(p policies.Policy) (dbPolicy, error) { + var ps pgtype.TextArray + if err := ps.Set(p.Actions); err != nil { + return dbPolicy{}, err + } + var updatedAt sql.NullTime + if !p.UpdatedAt.IsZero() { + updatedAt = sql.NullTime{Time: p.UpdatedAt, Valid: true} + } + var updatedBy *string + if p.UpdatedBy != "" { + updatedBy = &p.UpdatedBy + } + return dbPolicy{ + OwnerID: p.OwnerID, + Subject: p.Subject, + Object: p.Object, + Actions: ps, + CreatedAt: p.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + }, nil +} + +func toPolicy(dbp dbPolicy) (policies.Policy, error) { + var ps []string + for _, e := range dbp.Actions.Elements { + ps = append(ps, e.String) + } + var updatedAt time.Time + if dbp.UpdatedAt.Valid { + updatedAt = dbp.UpdatedAt.Time + } + var updatedBy string + if dbp.UpdatedBy != nil { + updatedBy = *dbp.UpdatedBy + } + return policies.Policy{ + OwnerID: dbp.OwnerID, + Subject: dbp.Subject, + Object: dbp.Object, + Actions: ps, + CreatedAt: dbp.CreatedAt, + UpdatedAt: updatedAt, + UpdatedBy: updatedBy, + }, nil +} + +func toDBPoliciesPage(pm policies.Page) (dbPoliciesPage, error) { + return dbPoliciesPage{ + Total: pm.Total, + Offset: pm.Offset, + Limit: pm.Limit, + OwnerID: pm.OwnerID, + Subject: pm.Subject, + Object: pm.Object, + Action: pm.Action, + }, nil +} + +type dbPoliciesPage struct { + Total uint64 `db:"total"` + Limit uint64 `db:"limit"` + Offset uint64 `db:"offset"` + OwnerID string `db:"owner_id"` + Subject string `db:"subject"` + Object string `db:"object"` + Action string `db:"action"` +} diff --git a/users/policies/postgres/policies_test.go b/users/policies/postgres/policies_test.go new file mode 100644 index 0000000000..60cdd900a0 --- /dev/null +++ b/users/policies/postgres/policies_test.go @@ -0,0 +1,677 @@ +package postgres_test + +import ( + "context" + "fmt" + "testing" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/postgres" + "github.com/mainflux/mainflux/internal/testsutil" + mfclients "github.com/mainflux/mainflux/pkg/clients" + "github.com/mainflux/mainflux/pkg/errors" + mfgroups "github.com/mainflux/mainflux/pkg/groups" + "github.com/mainflux/mainflux/pkg/uuid" + cpostgres "github.com/mainflux/mainflux/users/clients/postgres" + gpostgres "github.com/mainflux/mainflux/users/groups/postgres" + "github.com/mainflux/mainflux/users/policies" + ppostgres "github.com/mainflux/mainflux/users/policies/postgres" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() +) + +func TestPoliciesSave(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + client := mfclients.Client{ + ID: uid, + Name: "policy-save@example.com", + Credentials: mfclients.Credentials{ + Identity: "policy-save@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + clients, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client = clients[0] + + uid = testsutil.GenerateUUID(t, idProvider) + + cases := []struct { + desc string + policy policies.Policy + err error + }{ + { + desc: "add new policy successfully", + policy: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: uid, + Actions: []string{"c_delete"}, + }, + err: nil, + }, + { + desc: "add policy with duplicate subject, object and action", + policy: policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: uid, + Actions: []string{"c_delete"}, + }, + err: errors.ErrConflict, + }, + } + + for _, tc := range cases { + err := repo.Save(context.Background(), tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } +} + +func TestPoliciesEvaluate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + grepo := gpostgres.NewGroupRepo(database) + + client1 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connectedclients-clientA@example.com", + Credentials: mfclients.Credentials{ + Identity: "connectedclients-clientA@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + client2 := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connectedclients-clientB@example.com", + Credentials: mfclients.Credentials{ + Identity: "connectedclients-clientB@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + group := mfgroups.Group{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "connecting-group@example.com", + } + + clients1, err := crepo.Save(context.Background(), client1) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client1 = clients1[0] + clients2, err := crepo.Save(context.Background(), client2) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client2 = clients2[0] + group, err = grepo.Save(context.Background(), group) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy1 := policies.Policy{ + OwnerID: client1.ID, + Subject: client1.ID, + Object: group.ID, + Actions: []string{"c_update", "g_update"}, + } + policy2 := policies.Policy{ + OwnerID: client2.ID, + Subject: client2.ID, + Object: group.ID, + Actions: []string{"c_update", "g_update"}, + } + err = repo.Save(context.Background(), policy1) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + err = repo.Save(context.Background(), policy2) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + Action string + Domain string + err error + }{ + "evaluate valid client update": {client1.ID, client2.ID, "c_update", "client", nil}, + "evaluate valid group update": {client1.ID, group.ID, "g_update", "group", nil}, + "evaluate valid client list": {client1.ID, client2.ID, "c_list", "client", errors.ErrAuthorization}, + "evaluate valid group list": {client1.ID, group.ID, "g_list", "group", errors.ErrAuthorization}, + "evaluate invalid client delete": {client1.ID, client2.ID, "c_delete", "client", errors.ErrAuthorization}, + "evaluate invalid group delete": {client1.ID, group.ID, "g_delete", "group", errors.ErrAuthorization}, + "evaluate invalid client update": {"unknown", "unknown", "c_update", "client", errors.ErrAuthorization}, + "evaluate invalid group update": {"unknown", "unknown", "c_update", "group", errors.ErrAuthorization}, + } + + for desc, tc := range cases { + p := policies.Policy{ + Subject: tc.Subject, + Object: tc.Object, + Actions: []string{tc.Action}, + } + err := repo.Evaluate(context.Background(), tc.Domain, p) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } +} + +func TestPoliciesRetrieve(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + + uid := testsutil.GenerateUUID(t, idProvider) + + client := mfclients.Client{ + ID: uid, + Name: "single-policy-retrieval@example.com", + Credentials: mfclients.Credentials{ + Identity: "single-policy-retrieval@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + clients, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + client = clients[0] + + uid, err = idProvider.ID() + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + policy := policies.Policy{ + OwnerID: client.ID, + Subject: client.ID, + Object: uid, + Actions: []string{"c_delete"}, + } + + err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + err error + }{ + "retrieve existing policy": {uid, uid, nil}, + "retrieve non-existing policy": {"unknown", "unknown", nil}, + } + + for desc, tc := range cases { + pm := policies.Page{ + Subject: tc.Subject, + Object: tc.Object, + } + _, err := repo.Retrieve(context.Background(), pm) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } +} + +func TestPoliciesUpdate(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + + cid := testsutil.GenerateUUID(t, idProvider) + pid := testsutil.GenerateUUID(t, idProvider) + + client := mfclients.Client{ + ID: cid, + Name: "policy-update@example.com", + Credentials: mfclients.Credentials{ + Identity: "policy-update@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + _, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error during saving client: %s", err)) + + policy := policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + } + err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error during saving policy: %s", err)) + + cases := []struct { + desc string + policy policies.Policy + resp policies.Policy + err error + }{ + { + desc: "update policy successfully", + policy: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_update"}, + }, + resp: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_update"}, + }, + err: nil, + }, + { + desc: "update policy with missing owner id", + policy: policies.Policy{ + OwnerID: "", + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + }, + resp: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + }, + err: nil, + }, + { + desc: "update policy with missing subject", + policy: policies.Policy{ + OwnerID: cid, + Subject: "", + Object: pid, + Actions: []string{"c_add"}, + }, + resp: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + }, + err: apiutil.ErrMissingPolicySub, + }, + { + desc: "update policy with missing object", + policy: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: "", + Actions: []string{"c_add"}, + }, + resp: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + }, + + err: apiutil.ErrMissingPolicyObj, + }, + { + desc: "update policy with missing action", + policy: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{""}, + }, + resp: policies.Policy{ + OwnerID: cid, + Subject: cid, + Object: pid, + Actions: []string{"c_delete"}, + }, + err: apiutil.ErrMalformedPolicyAct, + }, + } + + for _, tc := range cases { + err := repo.Update(context.Background(), tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + policPage, err := repo.Retrieve(context.Background(), policies.Page{ + Offset: uint64(0), + Limit: uint64(10), + Subject: tc.policy.Subject, + }) + if err == nil { + assert.Equal(t, tc.resp, policPage.Policies[0], fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + } + } +} + +func TestPoliciesRetrievalAll(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + postgres.NewDatabase(db, tracer) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + + var nPolicies = uint64(10) + + clientA := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policyA-retrievalall@example.com", + Credentials: mfclients.Credentials{ + Identity: "policyA-retrievalall@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + clientB := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policyB-retrievalall@example.com", + Credentials: mfclients.Credentials{ + Identity: "policyB-retrievalall@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + clientsA, err := crepo.Save(context.Background(), clientA) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + clientA = clientsA[0] + clientsB, err := crepo.Save(context.Background(), clientB) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + clientB = clientsB[0] + + for i := uint64(0); i < nPolicies; i++ { + obj := fmt.Sprintf("TestRetrieveAll%d@example.com", i) + if i%2 == 0 { + policy := policies.Policy{ + OwnerID: clientA.ID, + Subject: clientA.ID, + Object: obj, + Actions: []string{"c_delete"}, + } + err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + policy := policies.Policy{ + Subject: clientB.ID, + Object: obj, + Actions: []string{"c_add", "c_update"}, + } + err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + } + + cases := map[string]struct { + size uint64 + pm policies.Page + }{ + "retrieve all policies with limit and offset": { + pm: policies.Page{ + Offset: 5, + Limit: nPolicies, + }, + size: 10, + }, + "retrieve all policies by owner id": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + }, + size: 5, + }, + "retrieve policies by wrong owner id": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + }, + size: 0, + }, + "retrieve all policies by Subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Subject: clientA.ID, + }, + size: 5, + }, + "retrieve policies by wrong Subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Subject: "wrongSubject", + }, + size: 0, + }, + + "retrieve all policies by Object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Object: "TestRetrieveAll1@example.com", + }, + size: 1, + }, + "retrieve policies by wrong Object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve all policies by Action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Action: "c_delete", + }, + size: 5, + }, + "retrieve policies by wrong Action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + Action: "wrongAction", + }, + size: 0, + }, + "retrieve all policies by owner id and subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Subject: clientA.ID, + }, + size: 5, + }, + "retrieve policies by wrong owner id and correct subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Subject: clientA.ID, + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Subject: "wrongSubject", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong subject": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + }, + size: 0, + }, + "retrieve all policies by owner id and object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Object: "TestRetrieveAll2@example.com", + }, + size: 1, + }, + "retrieve policies by wrong owner id and correct object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Object: "TestRetrieveAll1@example.com", + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong object": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Object: "TestRetrieveAll45@example.com", + }, + size: 0, + }, + "retrieve all policies by owner id and action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Action: "c_delete", + }, + size: 5, + }, + "retrieve policies by wrong owner id and correct action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Action: "c_delete", + }, + size: 0, + }, + "retrieve policies by correct owner id and wrong action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientA.ID, + Action: "wrongAction", + }, + size: 0, + }, + "retrieve policies by wrong owner id and wrong action": { + pm: policies.Page{ + Offset: 0, + Limit: nPolicies, + Total: nPolicies, + OwnerID: clientB.ID, + Action: "wrongAction", + }, + size: 0, + }, + } + for desc, tc := range cases { + page, err := repo.Retrieve(context.Background(), tc.pm) + size := uint64(len(page.Policies)) + assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) + assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", desc, err)) + } +} + +func TestPoliciesDelete(t *testing.T) { + t.Cleanup(func() { testsutil.CleanUpDB(t, db) }) + repo := ppostgres.NewPolicyRepo(database) + crepo := cpostgres.NewClientRepo(database) + + client := mfclients.Client{ + ID: testsutil.GenerateUUID(t, idProvider), + Name: "policy-delete@example.com", + Credentials: mfclients.Credentials{ + Identity: "policy-delete@example.com", + Secret: "pass", + }, + Status: mfclients.EnabledStatus, + } + + subject, err := crepo.Save(context.Background(), client) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + objectID := testsutil.GenerateUUID(t, idProvider) + + policy := policies.Policy{ + OwnerID: subject[0].ID, + Subject: subject[0].ID, + Object: objectID, + Actions: []string{"c_delete"}, + } + + err = repo.Save(context.Background(), policy) + require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) + + cases := map[string]struct { + Subject string + Object string + err error + }{ + "delete non-existing policy": {"unknown", "unknown", nil}, + "delete non-existing policy with correct subject": {subject[0].ID, "unknown", nil}, + "delete non-existing policy with correct object": {"unknown", objectID, nil}, + "delete existing policy": {subject[0].ID, objectID, nil}, + } + + for desc, tc := range cases { + policy := policies.Policy{ + Subject: tc.Subject, + Object: tc.Object, + } + err := repo.Delete(context.Background(), policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", desc, tc.err, err)) + } + pm := policies.Page{ + OwnerID: subject[0].ID, + Subject: subject[0].ID, + Object: objectID, + Action: "c_delete", + } + policyPage, err := repo.Retrieve(context.Background(), pm) + assert.Equal(t, uint64(0), policyPage.Total, fmt.Sprintf("retrieve policies unexpected total %d\n", policyPage.Total)) + require.Nil(t, err, fmt.Sprintf("retrieve policies unexpected error: %s", err)) +} diff --git a/users/policies/postgres/setup_test.go b/users/policies/postgres/setup_test.go new file mode 100644 index 0000000000..e377f955a8 --- /dev/null +++ b/users/policies/postgres/setup_test.go @@ -0,0 +1,93 @@ +// Package postgres_test contains tests for PostgreSQL repository +// implementations. +package postgres_test + +import ( + "database/sql" + "fmt" + "log" + "os" + "testing" + "time" + + "github.com/jmoiron/sqlx" + pgClient "github.com/mainflux/mainflux/internal/clients/postgres" + "github.com/mainflux/mainflux/internal/postgres" + upostgres "github.com/mainflux/mainflux/users/postgres" + dockertest "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "go.opentelemetry.io/otel" +) + +var ( + db *sqlx.DB + database postgres.Database + tracer = otel.Tracer("repo_tests") +) + +func TestMain(m *testing.M) { + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + container, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.1-alpine", + Env: []string{ + "POSTGRES_USER=test", + "POSTGRES_PASSWORD=test", + "POSTGRES_DB=test", + "listen_addresses = '*'", + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + if err != nil { + log.Fatalf("Could not start container: %s", err) + } + + port := container.GetPort("5432/tcp") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + pool.MaxWait = 120 * time.Second + if err := pool.Retry(func() error { + url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) + db, err := sql.Open("pgx", url) + if err != nil { + return err + } + return db.Ping() + }); err != nil { + log.Fatalf("Could not connect to docker: %s", err) + } + + dbConfig := pgClient.Config{ + Host: "localhost", + Port: port, + User: "test", + Pass: "test", + Name: "test", + SSLMode: "disable", + SSLCert: "", + SSLKey: "", + SSLRootCert: "", + } + + if db, err = pgClient.SetupDB(dbConfig, *upostgres.Migration()); err != nil { + log.Fatalf("Could not setup test DB connection: %s", err) + } + + database = postgres.NewDatabase(db, tracer) + + code := m.Run() + + // Defers will not be run when using os.Exit + db.Close() + if err := pool.Purge(container); err != nil { + log.Fatalf("Could not purge container: %s", err) + } + + os.Exit(code) +} diff --git a/users/policies/service.go b/users/policies/service.go new file mode 100644 index 0000000000..797db5828f --- /dev/null +++ b/users/policies/service.go @@ -0,0 +1,164 @@ +package policies + +import ( + "context" + "time" + + "github.com/mainflux/mainflux" + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/users/jwt" +) + +// Possible token types are access and refresh tokens. +const ( + AccessToken = "access" +) + +// Service unites Clients and Group services. +type Service interface { + PolicyService +} + +type service struct { + policies PolicyRepository + idProvider mainflux.IDProvider + tokens jwt.TokenRepository +} + +// NewService returns a new Clients service implementation. +func NewService(p PolicyRepository, t jwt.TokenRepository, idp mainflux.IDProvider) Service { + return service{ + policies: p, + tokens: t, + idProvider: idp, + } +} + +func (svc service) Authorize(ctx context.Context, entityType string, p Policy) error { + if err := p.Validate(); err != nil { + return err + } + if err := svc.policies.CheckAdmin(ctx, p.Subject); err == nil { + return nil + } + + return svc.policies.Evaluate(ctx, entityType, p) +} + +func (svc service) UpdatePolicy(ctx context.Context, token string, p Policy) error { + id, err := svc.identify(ctx, token) + if err != nil { + return err + } + if err := p.Validate(); err != nil { + return err + } + if err := svc.checkActionRank(ctx, id, p); err != nil { + return err + } + p.UpdatedAt = time.Now() + p.UpdatedBy = id + + return svc.policies.Update(ctx, p) +} + +func (svc service) AddPolicy(ctx context.Context, token string, p Policy) error { + id, err := svc.identify(ctx, token) + if err != nil { + return err + } + if err := p.Validate(); err != nil { + return err + } + + pm := Page{Subject: p.Subject, Object: p.Object, Offset: 0, Limit: 1} + page, err := svc.policies.Retrieve(ctx, pm) + if err != nil { + return err + } + + // If the policy already exists, replace the actions + if len(page.Policies) == 1 { + p.UpdatedAt = time.Now() + p.UpdatedBy = id + return svc.policies.Update(ctx, p) + } + + if err := svc.checkActionRank(ctx, id, p); err != nil { + return err + } + p.OwnerID = id + p.CreatedAt = time.Now() + + return svc.policies.Save(ctx, p) +} + +func (svc service) DeletePolicy(ctx context.Context, token string, p Policy) error { + id, err := svc.identify(ctx, token) + if err != nil { + return err + } + if err := svc.checkActionRank(ctx, id, p); err != nil { + return err + } + + return svc.policies.Delete(ctx, p) +} + +func (svc service) ListPolicy(ctx context.Context, token string, pm Page) (PolicyPage, error) { + id, err := svc.identify(ctx, token) + if err != nil { + return PolicyPage{}, err + } + if err := pm.Validate(); err != nil { + return PolicyPage{}, err + } + // If the user is admin, return all policies + if err := svc.policies.CheckAdmin(ctx, id); err == nil { + return svc.policies.Retrieve(ctx, pm) + } + + // If the user is not admin, return only the policies that they are in + pm.Subject = id + pm.Object = id + + return svc.policies.Retrieve(ctx, pm) +} + +// checkActionRank check if an action is in the provide list of actions +func (svc service) checkActionRank(ctx context.Context, clientID string, p Policy) error { + page, err := svc.policies.Retrieve(ctx, Page{Subject: clientID, Object: p.Object}) + if err != nil { + return err + } + if len(page.Policies) != 0 { + for _, a := range p.Actions { + var found = false + for _, v := range page.Policies[0].Actions { + if v == a { + found = true + break + } + } + if !found { + return apiutil.ErrHigherPolicyRank + } + } + } + + return nil + +} + +func (svc service) identify(ctx context.Context, tkn string) (string, error) { + claims, err := svc.tokens.Parse(ctx, tkn) + if err != nil { + return "", errors.Wrap(errors.ErrAuthentication, err) + } + if claims.Type != AccessToken { + return "", errors.ErrAuthentication + } + + return claims.ClientID, nil +} diff --git a/users/policies/service_test.go b/users/policies/service_test.go new file mode 100644 index 0000000000..00157edb90 --- /dev/null +++ b/users/policies/service_test.go @@ -0,0 +1,405 @@ +package policies_test + +import ( + context "context" + fmt "fmt" + "regexp" + "testing" + "time" + + "github.com/mainflux/mainflux/internal/apiutil" + "github.com/mainflux/mainflux/internal/testsutil" + "github.com/mainflux/mainflux/pkg/errors" + "github.com/mainflux/mainflux/pkg/uuid" + "github.com/mainflux/mainflux/users/clients" + "github.com/mainflux/mainflux/users/clients/mocks" + "github.com/mainflux/mainflux/users/hasher" + "github.com/mainflux/mainflux/users/jwt" + "github.com/mainflux/mainflux/users/policies" + pmocks "github.com/mainflux/mainflux/users/policies/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + idProvider = uuid.New() + phasher = hasher.New() + secret = "strongsecret" + inValidToken = "invalidToken" + memberActions = []string{"g_list"} + authoritiesObj = "authorities" + passRegex = regexp.MustCompile("^.{8,}$") + accessDuration = time.Minute * 1 + refreshDuration = time.Minute * 10 +) + +func TestAddPolicy(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + + policy := policies.Policy{Object: testsutil.GenerateUUID(t, idProvider), Subject: testsutil.GenerateUUID(t, idProvider), Actions: []string{"m_read"}} + + cases := []struct { + desc string + policy policies.Policy + page policies.PolicyPage + token string + err error + }{ + { + desc: "add new policy", + policy: policy, + page: policies.PolicyPage{}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + }, + { + desc: "add existing policy", + policy: policies.Policy{ + Object: policy.Object, + Subject: policy.Subject, + Actions: []string{"m_write"}, + }, + page: policies.PolicyPage{Policies: []policies.Policy{policy}}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: errors.ErrConflict, + }, + { + desc: "add a new policy with owner", + page: policies.PolicyPage{}, + policy: policies.Policy{ + OwnerID: testsutil.GenerateUUID(t, idProvider), + Subject: testsutil.GenerateUUID(t, idProvider), + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"m_read"}, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + { + desc: "add a new policy with more actions", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Subject: testsutil.GenerateUUID(t, idProvider), + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"c_delete", "c_update", "c_add", "c_list"}, + }, + err: nil, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + { + desc: "add a new policy with wrong action", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Subject: testsutil.GenerateUUID(t, idProvider), + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"wrong"}, + }, + err: apiutil.ErrMalformedPolicyAct, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + { + desc: "add a new policy with empty object", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Subject: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"c_delete"}, + }, + err: apiutil.ErrMissingPolicyObj, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + { + desc: "add a new policy with empty subject", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"c_delete"}, + }, + err: apiutil.ErrMissingPolicySub, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + { + desc: "add a new policy with empty action", + page: policies.PolicyPage{}, + policy: policies.Policy{ + Subject: testsutil.GenerateUUID(t, idProvider), + Object: testsutil.GenerateUUID(t, idProvider), + }, + err: apiutil.ErrMalformedPolicyAct, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(tc.page, nil) + repoCall2 := pRepo.On("Update", context.Background(), mock.Anything).Return(tc.err) + repoCall3 := pRepo.On("Save", context.Background(), mock.Anything).Return(tc.err) + err := svc.AddPolicy(context.Background(), tc.token, tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if err == nil { + tc.policy.Subject = tc.token + err = svc.Authorize(context.Background(), "client", tc.policy) + require.Nil(t, err, fmt.Sprintf("checking shared %v policy expected to be succeed: %#v", tc.policy, err)) + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + ok = repoCall1.Parent.AssertCalled(t, "Retrieve", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Retrieve was not called on %s", tc.desc)) + ok = repoCall3.Parent.AssertCalled(t, "Save", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Save was not called on %s", tc.desc)) + if tc.desc == "add existing policy" { + ok = repoCall2.Parent.AssertCalled(t, "Update", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + } + repoCall.Unset() + repoCall1.Unset() + repoCall2.Unset() + repoCall3.Unset() + } + +} + +func TestAuthorize(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + + cases := []struct { + desc string + policy policies.Policy + domain string + err error + }{ + { + desc: "check valid policy in client domain", + policy: policies.Policy{ + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"c_update"}, + Subject: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher)}, + domain: "client", + err: nil, + }, + { + desc: "check valid policy in group domain", + policy: policies.Policy{ + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"g_update"}, + Subject: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher)}, + domain: "group", + err: errors.ErrConflict, + }, + { + desc: "check invalid policy in client domain", + policy: policies.Policy{ + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"c_update"}, + Subject: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher)}, + domain: "client", + err: nil, + }, + { + desc: "check invalid policy in group domain", + policy: policies.Policy{ + Object: testsutil.GenerateUUID(t, idProvider), + Actions: []string{"g_update"}, + Subject: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher)}, + domain: "group", + err: nil, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(tc.err) + repoCall1 := pRepo.On("Evaluate", context.Background(), tc.domain, mock.Anything).Return(tc.err) + err := svc.Authorize(context.Background(), tc.domain, tc.policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "CheckAdmin", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("CheckAdmin was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } + +} + +func TestDeletePolicy(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + + pr := policies.Policy{Object: authoritiesObj, Actions: memberActions, Subject: testsutil.GenerateUUID(t, idProvider)} + + repoCall := pRepo.On("Delete", context.Background(), pr).Return(nil) + repoCall1 := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(policies.PolicyPage{Policies: []policies.Policy{pr}}, nil) + err := svc.DeletePolicy(context.Background(), testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), pr) + require.Nil(t, err, fmt.Sprintf("deleting %v policy expected to succeed: %s", pr, err)) + ok := repoCall.Parent.AssertCalled(t, "Delete", context.Background(), pr) + assert.True(t, ok, "Delete was not called on deleting policy") + ok = repoCall1.Parent.AssertCalled(t, "Retrieve", context.Background(), mock.Anything) + assert.True(t, ok, "Retrieve was not called on deleting policy") + repoCall.Unset() + repoCall1.Unset() +} + +func TestListPolicies(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + + id := testsutil.GenerateUUID(t, idProvider) + + readPolicy := "m_read" + writePolicy := "m_write" + + var nPolicy = uint64(10) + var aPolicies = []policies.Policy{} + for i := uint64(0); i < nPolicy; i++ { + pr := policies.Policy{ + OwnerID: id, + Actions: []string{readPolicy}, + Subject: fmt.Sprintf("thing_%d", i), + Object: fmt.Sprintf("client_%d", i), + } + if i%3 == 0 { + pr.Actions = []string{writePolicy} + } + aPolicies = append(aPolicies, pr) + } + + cases := []struct { + desc string + token string + page policies.Page + response policies.PolicyPage + err error + }{ + { + desc: "list policies with authorized token", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 0, + Total: nPolicy, + }, + Policies: aPolicies, + }, + }, + { + desc: "list policies with invalid token", + token: inValidToken, + err: errors.ErrAuthentication, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 0, + }, + }, + }, + { + desc: "list policies with offset and limit", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + page: policies.Page{ + Offset: 6, + Limit: nPolicy, + }, + response: policies.PolicyPage{ + Page: policies.Page{ + Offset: 6, + Total: nPolicy, + }, + Policies: aPolicies[6:10], + }, + }, + { + desc: "list policies with wrong action", + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + page: policies.Page{ + Action: "wrong", + }, + response: policies.PolicyPage{}, + err: apiutil.ErrMalformedPolicyAct, + }, + } + + for _, tc := range cases { + repoCall := pRepo.On("CheckAdmin", context.Background(), mock.Anything).Return(nil) + repoCall1 := pRepo.On("Retrieve", context.Background(), tc.page).Return(tc.response, tc.err) + page, err := svc.ListPolicy(context.Background(), tc.token, tc.page) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + assert.Equal(t, tc.response, page, fmt.Sprintf("%s: expected size %v got %v\n", tc.desc, tc.response, page)) + if tc.err == nil { + ok := repoCall.Parent.AssertCalled(t, "Retrieve", context.Background(), tc.page) + assert.True(t, ok, fmt.Sprintf("Retrieve was not called on %s", tc.desc)) + } + repoCall1.Unset() + repoCall.Unset() + } + +} + +func TestUpdatePolicies(t *testing.T) { + cRepo := new(mocks.ClientRepository) + pRepo := new(pmocks.PolicyRepository) + tokenizer := jwt.NewTokenRepo([]byte(secret), accessDuration, refreshDuration) + e := mocks.NewEmailer() + csvc := clients.NewService(cRepo, pRepo, tokenizer, e, phasher, idProvider, passRegex) + svc := policies.NewService(pRepo, tokenizer, idProvider) + + policy := policies.Policy{Object: "obj1", Actions: []string{"m_read"}, Subject: "sub1"} + + cases := []struct { + desc string + action []string + token string + err error + }{ + { + desc: "update policy actions with valid token", + action: []string{"m_write"}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: nil, + }, + { + desc: "update policy action with invalid token", + action: []string{"m_write"}, + token: "non-existent", + err: errors.ErrAuthentication, + }, + { + desc: "update policy action with wrong policy action", + action: []string{"wrong"}, + token: testsutil.GenerateValidToken(t, testsutil.GenerateUUID(t, idProvider), csvc, cRepo, phasher), + err: apiutil.ErrMalformedPolicyAct, + }, + } + + for _, tc := range cases { + policy.Actions = tc.action + repoCall := pRepo.On("Retrieve", context.Background(), mock.Anything).Return(policies.PolicyPage{Policies: []policies.Policy{policy}}, nil) + repoCall1 := pRepo.On("Update", context.Background(), mock.Anything).Return(tc.err) + err := svc.UpdatePolicy(context.Background(), tc.token, policy) + assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) + if tc.err == nil { + ok := repoCall1.Parent.AssertCalled(t, "Update", context.Background(), mock.Anything) + assert.True(t, ok, fmt.Sprintf("Update was not called on %s", tc.desc)) + } + repoCall.Unset() + repoCall1.Unset() + } +} diff --git a/users/policies/tracing/tracing.go b/users/policies/tracing/tracing.go new file mode 100644 index 0000000000..82b6530420 --- /dev/null +++ b/users/policies/tracing/tracing.go @@ -0,0 +1,58 @@ +package tracing + +import ( + "context" + + "github.com/mainflux/mainflux/users/policies" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +var _ policies.Service = (*tracingMiddleware)(nil) + +type tracingMiddleware struct { + tracer trace.Tracer + psvc policies.Service +} + +func TracingMiddleware(psvc policies.Service, tracer trace.Tracer) policies.Service { + return &tracingMiddleware{tracer, psvc} +} + +func (tm *tracingMiddleware) Authorize(ctx context.Context, domain string, p policies.Policy) error { + ctx, span := tm.tracer.Start(ctx, "svc_authorize", trace.WithAttributes(attribute.StringSlice("Action", p.Actions))) + defer span.End() + + return tm.psvc.Authorize(ctx, domain, p) +} +func (tm *tracingMiddleware) UpdatePolicy(ctx context.Context, token string, p policies.Policy) error { + ctx, span := tm.tracer.Start(ctx, "svc_update_policy", trace.WithAttributes(attribute.StringSlice("Actions", p.Actions))) + defer span.End() + + return tm.psvc.UpdatePolicy(ctx, token, p) + +} + +func (tm *tracingMiddleware) AddPolicy(ctx context.Context, token string, p policies.Policy) error { + ctx, span := tm.tracer.Start(ctx, "svc_add_policy", trace.WithAttributes(attribute.StringSlice("Actions", p.Actions))) + defer span.End() + + return tm.psvc.AddPolicy(ctx, token, p) + +} + +func (tm *tracingMiddleware) DeletePolicy(ctx context.Context, token string, p policies.Policy) error { + ctx, span := tm.tracer.Start(ctx, "svc_delete_policy", trace.WithAttributes(attribute.String("Subject", p.Subject), attribute.String("Object", p.Object))) + defer span.End() + + return tm.psvc.DeletePolicy(ctx, token, p) + +} + +func (tm *tracingMiddleware) ListPolicy(ctx context.Context, token string, pm policies.Page) (policies.PolicyPage, error) { + ctx, span := tm.tracer.Start(ctx, "svc_list_policy") + defer span.End() + + return tm.psvc.ListPolicy(ctx, token, pm) + +} diff --git a/users/postgres/database.go b/users/postgres/database.go deleted file mode 100644 index 946cb8d9ba..0000000000 --- a/users/postgres/database.go +++ /dev/null @@ -1,60 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - - "github.com/jmoiron/sqlx" - "github.com/opentracing/opentracing-go" -) - -var _ Database = (*database)(nil) - -type database struct { - db *sqlx.DB -} - -// Database provides a database interface -type Database interface { - NamedExecContext(context.Context, string, interface{}) (sql.Result, error) - QueryRowxContext(context.Context, string, ...interface{}) *sqlx.Row - NamedQueryContext(context.Context, string, interface{}) (*sqlx.Rows, error) - GetContext(context.Context, interface{}, string, ...interface{}) error -} - -// NewDatabase creates a ThingDatabase instance -func NewDatabase(db *sqlx.DB) Database { - return &database{ - db: db, - } -} - -func (dm database) NamedExecContext(ctx context.Context, query string, args interface{}) (sql.Result, error) { - addSpanTags(ctx, query) - return dm.db.NamedExecContext(ctx, query, args) -} - -func (dm database) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *sqlx.Row { - addSpanTags(ctx, query) - return dm.db.QueryRowxContext(ctx, query, args...) -} - -func (dm database) NamedQueryContext(ctx context.Context, query string, args interface{}) (*sqlx.Rows, error) { - addSpanTags(ctx, query) - return dm.db.NamedQueryContext(ctx, query, args) -} - -func (dm database) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { - addSpanTags(ctx, query) - return dm.db.GetContext(ctx, dest, query, args...) -} - -func addSpanTags(ctx context.Context, query string) { - span := opentracing.SpanFromContext(ctx) - if span != nil { - span.SetTag("sql.statement", query) - span.SetTag("span.kind", "client") - span.SetTag("peer.service", "postgres") - span.SetTag("db.type", "sql") - } -} diff --git a/users/postgres/doc.go b/users/postgres/doc.go deleted file mode 100644 index 522b9a7acf..0000000000 --- a/users/postgres/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres contains repository implementations using PostgreSQL as -// the underlying database. -package postgres diff --git a/users/postgres/init.go b/users/postgres/init.go index 5bb917e421..effcb91f0b 100644 --- a/users/postgres/init.go +++ b/users/postgres/init.go @@ -1,57 +1,64 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - package postgres -import migrate "github.com/rubenv/sql-migrate" +import ( + _ "github.com/jackc/pgx/v5/stdlib" // required for SQL access + migrate "github.com/rubenv/sql-migrate" +) // Migration of Users service func Migration() *migrate.MemoryMigrationSource { return &migrate.MemoryMigrationSource{ Migrations: []*migrate.Migration{ { - Id: "users_1", + Id: "clients_01", + // VARCHAR(36) for colums with IDs as UUIDS have a maximum of 36 characters + // STATUS 0 to imply enabled and 1 to imply disabled + // Role 0 to imply user role and 1 to imply admin role Up: []string{ - `CREATE TABLE IF NOT EXISTS users ( - email VARCHAR(254) PRIMARY KEY, - password CHAR(60) NOT NULL + `CREATE TABLE IF NOT EXISTS clients ( + id VARCHAR(36) PRIMARY KEY, + name VARCHAR(254), + owner_id VARCHAR(36), + identity VARCHAR(254) NOT NULL UNIQUE, + secret TEXT NOT NULL, + tags TEXT[], + metadata JSONB, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0), + role SMALLINT DEFAULT 0 CHECK (status >= 0) + )`, + `CREATE TABLE IF NOT EXISTS groups ( + id VARCHAR(36) PRIMARY KEY, + parent_id VARCHAR(36), + owner_id VARCHAR(36) NOT NULL, + name VARCHAR(254) NOT NULL, + description VARCHAR(1024), + metadata JSONB, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + status SMALLINT NOT NULL DEFAULT 0 CHECK (status >= 0), + UNIQUE (owner_id, name), + FOREIGN KEY (parent_id) REFERENCES groups (id) ON DELETE CASCADE + )`, + `CREATE TABLE IF NOT EXISTS policies ( + owner_id VARCHAR(36) NOT NULL, + subject VARCHAR(36) NOT NULL, + object VARCHAR(36) NOT NULL, + actions TEXT[] NOT NULL, + created_at TIMESTAMP, + updated_at TIMESTAMP, + updated_by VARCHAR(254), + FOREIGN KEY (subject) REFERENCES clients (id) ON DELETE CASCADE ON UPDATE CASCADE, + PRIMARY KEY (subject, object) )`, }, - Down: []string{"DROP TABLE users"}, - }, - { - Id: "users_2", - Up: []string{ - `ALTER TABLE IF EXISTS users ADD COLUMN IF NOT EXISTS metadata JSONB`, - }, - }, - { - Id: "users_3", - Up: []string{ - `CREATE EXTENSION IF NOT EXISTS "pgcrypto"; - ALTER TABLE IF EXISTS users ADD COLUMN IF NOT EXISTS - id UUID NOT NULL DEFAULT gen_random_uuid()`, - }, - }, - { - Id: "users_4", - Up: []string{ - `ALTER TABLE IF EXISTS users DROP CONSTRAINT users_pkey`, - `ALTER TABLE IF EXISTS users ADD CONSTRAINT users_email_key UNIQUE (email)`, - `ALTER TABLE IF EXISTS users ADD PRIMARY KEY (id)`, - }, - }, - { - Id: "users_5", - Up: []string{ - `DO $$ - BEGIN - IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'user_status') THEN - CREATE TYPE user_status AS ENUM ('enabled', 'disabled'); - END IF; - END$$;`, - `ALTER TABLE IF EXISTS users ADD COLUMN IF NOT EXISTS - status USER_STATUS NOT NULL DEFAULT 'enabled'`, + Down: []string{ + `DROP TABLE IF EXISTS clients`, + `DROP TABLE IF EXISTS groups`, + `DROP TABLE IF EXISTS policies`, }, }, }, diff --git a/users/postgres/setup_test.go b/users/postgres/setup_test.go deleted file mode 100644 index 1b366a8e74..0000000000 --- a/users/postgres/setup_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package postgres_test contains tests for PostgreSQL repository -// implementations. -package postgres_test - -import ( - "database/sql" - "fmt" - "log" - "os" - "testing" - - "github.com/jmoiron/sqlx" - - pgClient "github.com/mainflux/mainflux/internal/clients/postgres" - "github.com/mainflux/mainflux/users/postgres" - dockertest "github.com/ory/dockertest/v3" -) - -var db *sqlx.DB - -func TestMain(m *testing.M) { - pool, err := dockertest.NewPool("") - if err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - cfg := []string{ - "POSTGRES_USER=test", - "POSTGRES_PASSWORD=test", - "POSTGRES_DB=test", - } - container, err := pool.Run("postgres", "13.3-alpine", cfg) - if err != nil { - log.Fatalf("Could not start container: %s", err) - } - - port := container.GetPort("5432/tcp") - - if err := pool.Retry(func() error { - url := fmt.Sprintf("host=localhost port=%s user=test dbname=test password=test sslmode=disable", port) - db, err := sql.Open("pgx", url) - if err != nil { - return err - } - return db.Ping() - }); err != nil { - log.Fatalf("Could not connect to docker: %s", err) - } - - dbConfig := pgClient.Config{ - Host: "localhost", - Port: port, - User: "test", - Pass: "test", - Name: "test", - SSLMode: "disable", - SSLCert: "", - SSLKey: "", - SSLRootCert: "", - } - - if db, err = pgClient.SetupDB(dbConfig, *postgres.Migration()); err != nil { - log.Fatalf("Could not setup test DB connection: %s", err) - } - - code := m.Run() - - // Defers will not be run when using os.Exit - db.Close() - if err := pool.Purge(container); err != nil { - log.Fatalf("Could not purge container: %s", err) - } - - os.Exit(code) -} diff --git a/users/postgres/users.go b/users/postgres/users.go deleted file mode 100644 index 094405444c..0000000000 --- a/users/postgres/users.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "strings" - - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" -) - -var _ users.UserRepository = (*userRepository)(nil) - -type userRepository struct { - db Database -} - -// NewUserRepo instantiates a PostgreSQL implementation of user -// repository. -func NewUserRepo(db Database) users.UserRepository { - return &userRepository{ - db: db, - } -} - -func (ur userRepository) Save(ctx context.Context, user users.User) (string, error) { - q := `INSERT INTO users (email, password, id, metadata, status) VALUES (:email, :password, :id, :metadata, :status) RETURNING id` - if user.ID == "" || user.Email == "" { - return "", errors.ErrMalformedEntity - } - - dbu, err := toDBUser(user) - if err != nil { - return "", errors.Wrap(errors.ErrCreateEntity, err) - } - - row, err := ur.db.NamedQueryContext(ctx, q, dbu) - - if err != nil { - pgErr, ok := err.(*pgconn.PgError) - if ok { - switch pgErr.Code { - case pgerrcode.InvalidTextRepresentation: - return "", errors.Wrap(errors.ErrMalformedEntity, err) - case pgerrcode.UniqueViolation: - return "", errors.Wrap(errors.ErrConflict, err) - } - } - return "", errors.Wrap(errors.ErrCreateEntity, err) - } - - defer row.Close() - row.Next() - var id string - if err := row.Scan(&id); err != nil { - return "", err - } - return id, nil -} - -func (ur userRepository) Update(ctx context.Context, user users.User) error { - q := `UPDATE users SET(email, password, metadata, status) VALUES (:email, :password, :metadata, :status) WHERE email = :email;` - - dbu, err := toDBUser(user) - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - if _, err := ur.db.NamedExecContext(ctx, q, dbu); err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - return nil -} - -func (ur userRepository) UpdateUser(ctx context.Context, user users.User) error { - q := `UPDATE users SET metadata = :metadata WHERE email = :email AND status = 'enabled'` - - dbu, err := toDBUser(user) - if err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - if _, err := ur.db.NamedExecContext(ctx, q, dbu); err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - return nil -} - -func (ur userRepository) RetrieveByEmail(ctx context.Context, email string) (users.User, error) { - q := `SELECT id, password, metadata FROM users WHERE email = $1 AND status = 'enabled'` - - dbu := dbUser{ - Email: email, - } - - if err := ur.db.QueryRowxContext(ctx, q, email).StructScan(&dbu); err != nil { - if err == sql.ErrNoRows { - return users.User{}, errors.Wrap(errors.ErrNotFound, err) - - } - return users.User{}, errors.Wrap(errors.ErrViewEntity, err) - } - - return toUser(dbu) -} - -func (ur userRepository) RetrieveByID(ctx context.Context, id string) (users.User, error) { - q := `SELECT email, password, metadata FROM users WHERE id = $1` - - dbu := dbUser{ - ID: id, - } - - if err := ur.db.QueryRowxContext(ctx, q, id).StructScan(&dbu); err != nil { - if err == sql.ErrNoRows { - return users.User{}, errors.Wrap(errors.ErrNotFound, err) - - } - return users.User{}, errors.Wrap(errors.ErrViewEntity, err) - } - - return toUser(dbu) -} - -func (ur userRepository) RetrieveAll(ctx context.Context, userIDs []string, pm users.PageMetadata) (users.UserPage, error) { - eq, ep, err := createEmailQuery("", pm.Email) - if err != nil { - return users.UserPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - mq, mp, err := createMetadataQuery("", pm.Metadata) - if err != nil { - return users.UserPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - aq := fmt.Sprintf("status = '%s'", pm.Status) - if pm.Status == users.AllStatusKey { - aq = "" - } - - var query []string - var emq string - if eq != "" { - query = append(query, eq) - } - if mq != "" { - query = append(query, mq) - } - if aq != "" { - query = append(query, aq) - } - - if len(userIDs) > 0 { - query = append(query, fmt.Sprintf("id IN ('%s')", strings.Join(userIDs, "','"))) - } - if len(query) > 0 { - emq = fmt.Sprintf(" WHERE %s", strings.Join(query, " AND ")) - } - - q := fmt.Sprintf(`SELECT id, email, metadata FROM users %s ORDER BY email LIMIT :limit OFFSET :offset;`, emq) - params := map[string]interface{}{ - "limit": pm.Limit, - "offset": pm.Offset, - "email": ep, - "metadata": mp, - } - - rows, err := ur.db.NamedQueryContext(ctx, q, params) - if err != nil { - return users.UserPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - defer rows.Close() - - var items []users.User - for rows.Next() { - dbusr := dbUser{} - if err := rows.StructScan(&dbusr); err != nil { - return users.UserPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - user, err := toUser(dbusr) - if err != nil { - return users.UserPage{}, err - } - - items = append(items, user) - } - - cq := fmt.Sprintf(`SELECT COUNT(*) FROM users %s;`, emq) - - total, err := total(ctx, ur.db, cq, params) - if err != nil { - return users.UserPage{}, errors.Wrap(errors.ErrViewEntity, err) - } - - page := users.UserPage{ - Users: items, - PageMetadata: users.PageMetadata{ - Total: total, - Offset: pm.Offset, - Limit: pm.Limit, - }, - } - - return page, nil -} - -func (ur userRepository) UpdatePassword(ctx context.Context, email, password string) error { - q := `UPDATE users SET password = :password WHERE status = 'enabled' AND email = :email` - - db := dbUser{ - Email: email, - Password: password, - } - - if _, err := ur.db.NamedExecContext(ctx, q, db); err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - return nil -} - -func (ur userRepository) ChangeStatus(ctx context.Context, id, status string) error { - q := fmt.Sprintf(`UPDATE users SET status = '%s' WHERE id = :id`, status) - - dbu := dbUser{ - ID: id, - } - - if _, err := ur.db.NamedExecContext(ctx, q, dbu); err != nil { - return errors.Wrap(errors.ErrUpdateEntity, err) - } - - return nil -} - -type dbUser struct { - ID string `db:"id"` - Email string `db:"email"` - Password string `db:"password"` - Metadata []byte `db:"metadata"` - Groups []auth.Group `db:"groups"` - Status string `db:"status"` -} - -func toDBUser(u users.User) (dbUser, error) { - data := []byte("{}") - if len(u.Metadata) > 0 { - b, err := json.Marshal(u.Metadata) - if err != nil { - return dbUser{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - data = b - } - - return dbUser{ - ID: u.ID, - Email: u.Email, - Password: u.Password, - Metadata: data, - Status: u.Status, - }, nil -} - -func total(ctx context.Context, db Database, query string, params interface{}) (uint64, error) { - rows, err := db.NamedQueryContext(ctx, query, params) - if err != nil { - return 0, err - } - defer rows.Close() - total := uint64(0) - if rows.Next() { - if err := rows.Scan(&total); err != nil { - return 0, err - } - } - return total, nil -} - -func toUser(dbu dbUser) (users.User, error) { - var metadata map[string]interface{} - if dbu.Metadata != nil { - if err := json.Unmarshal([]byte(dbu.Metadata), &metadata); err != nil { - return users.User{}, errors.Wrap(errors.ErrMalformedEntity, err) - } - } - - return users.User{ - ID: dbu.ID, - Email: dbu.Email, - Password: dbu.Password, - Metadata: metadata, - Status: dbu.Status, - }, nil -} - -func createEmailQuery(entity string, email string) (string, string, error) { - if email == "" { - return "", "", nil - } - - // Create LIKE operator to search Users with email containing a given string - param := fmt.Sprintf(`%%%s%%`, email) - query := fmt.Sprintf("%semail LIKE :email", entity) - - return query, param, nil -} - -func createMetadataQuery(entity string, um users.Metadata) (string, []byte, error) { - if len(um) == 0 { - return "", nil, nil - } - - param, err := json.Marshal(um) - if err != nil { - return "", nil, err - } - query := fmt.Sprintf("%smetadata @> :metadata", entity) - - return query, param, nil -} diff --git a/users/postgres/users_test.go b/users/postgres/users_test.go deleted file mode 100644 index d3d5a9435d..0000000000 --- a/users/postgres/users_test.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package postgres_test - -import ( - "context" - "fmt" - "testing" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - "github.com/mainflux/mainflux/users/postgres" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var idProvider = uuid.New() - -func TestUserSave(t *testing.T) { - email := "user-save@example.com" - - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - user users.User - response string - err error - }{ - { - desc: "new user", - user: users.User{ - ID: uid, - Email: email, - Password: "pass", - Status: users.EnabledStatusKey, - }, - response: uid, - err: nil, - }, - { - desc: "duplicate user", - user: users.User{ - ID: uid, - Email: email, - Password: "pass", - Status: users.EnabledStatusKey, - }, - response: "", - err: errors.ErrConflict, - }, - { - desc: "invalid user status", - user: users.User{ - ID: uid, - Email: email, - Password: "pass", - Status: "invalid", - }, - response: "", - err: errors.ErrMalformedEntity, - }, - } - - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.NewUserRepo(dbMiddleware) - - for _, tc := range cases { - resp, err := repo.Save(context.Background(), tc.user) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, resp, fmt.Sprintf("%s: expected %s got %s", tc.desc, tc.response, resp)) - } -} - -func TestSingleUserRetrieval(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - repo := postgres.NewUserRepo(dbMiddleware) - - email := "user-retrieval@example.com" - - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - user := users.User{ - ID: uid, - Email: email, - Password: "pass", - Status: users.EnabledStatusKey, - Metadata: make(users.Metadata), - } - - _, err = repo.Save(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - email string - response users.User - err error - }{ - { - desc: "existing user", - email: email, - response: user, - err: nil, - }, - { - desc: "non-existing user", - email: "unknown@example.com", - response: users.User{}, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - resp, err := repo.RetrieveByEmail(context.Background(), tc.email) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response.ID, resp.ID, fmt.Sprintf("%s: got incorrect user from RetrieveByEmail", tc.desc)) - assert.Equal(t, tc.response.Email, resp.Email, fmt.Sprintf("%s: got incorrect user from RetrieveByEmail", tc.desc)) - } -} - -func TestRetrieveAll(t *testing.T) { - dbMiddleware := postgres.NewDatabase(db) - userRepo := postgres.NewUserRepo(dbMiddleware) - metaNum := uint64(2) - var nUsers = uint64(10) - - meta := users.Metadata{ - "admin": "true", - } - - wrongMeta := users.Metadata{ - "wrong": "true", - } - - var ids []string - for i := uint64(0); i < nUsers; i++ { - uid, err := idProvider.ID() - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - email := fmt.Sprintf("TestRetrieveAll%d@example.com", i) - user := users.User{ - ID: uid, - Email: email, - Password: "pass", - Status: users.EnabledStatusKey, - } - if i < metaNum { - user.Metadata = meta - } - ids = append(ids, uid) - _, err = userRepo.Save(context.Background(), user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - cases := []struct { - desc string - email string - offset uint64 - limit uint64 - size uint64 - total uint64 - ids []string - metadata users.Metadata - }{ - { - desc: "retrieve all users filtered by email", - email: "All", - offset: 0, - limit: nUsers, - size: nUsers, - total: nUsers, - ids: ids, - }, - { - desc: "retrieve all users by email with limit and offset", - email: "All", - offset: 2, - limit: 5, - size: 5, - total: nUsers, - ids: ids, - }, - { - desc: "retrieve all users by metadata", - email: "All", - offset: 0, - limit: nUsers, - size: metaNum, - total: nUsers, - metadata: meta, - ids: ids, - }, - { - desc: "retrieve users by metadata and ids", - email: "All", - offset: 0, - limit: nUsers, - size: 1, - total: nUsers, - metadata: meta, - ids: []string{ids[0]}, - }, - { - desc: "retrieve users by wrong metadata", - email: "All", - offset: 0, - limit: nUsers, - size: 0, - total: nUsers, - metadata: wrongMeta, - ids: ids, - }, - { - desc: "retrieve users by wrong metadata and ids", - email: "All", - offset: 0, - limit: nUsers, - size: 0, - total: nUsers, - metadata: wrongMeta, - ids: []string{ids[0]}, - }, - { - desc: "retrieve all users by list of ids with limit and offset", - email: "All", - offset: 2, - limit: 5, - size: 5, - total: nUsers, - ids: ids, - }, - { - desc: "retrieve all users by list of ids with limit and offset and metadata", - email: "All", - offset: 1, - limit: 5, - size: 1, - total: nUsers, - ids: ids[0:5], - metadata: meta, - }, - { - desc: "retrieve all users from empty ids", - email: "All", - offset: 0, - limit: nUsers, - size: nUsers, - total: nUsers, - ids: []string{}, - }, - { - desc: "retrieve all users from empty ids with offset", - email: "All", - offset: 1, - limit: 5, - size: 5, - total: nUsers, - ids: []string{}, - }, - } - for _, tc := range cases { - pm := users.PageMetadata{ - Offset: tc.offset, - Limit: tc.limit, - Email: tc.email, - Metadata: tc.metadata, - Status: users.EnabledStatusKey, - } - - page, err := userRepo.RetrieveAll(context.Background(), tc.ids, pm) - size := uint64(len(page.Users)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.Nil(t, err, fmt.Sprintf("%s: expected no error got %d\n", tc.desc, err)) - } -} diff --git a/users/service.go b/users/service.go deleted file mode 100644 index 29a51eff0d..0000000000 --- a/users/service.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users - -import ( - "context" - "regexp" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/auth" - "github.com/mainflux/mainflux/internal/apiutil" - "github.com/mainflux/mainflux/pkg/errors" -) - -const ( - memberRelationKey = "member" - authoritiesObjKey = "authorities" - usersObjKey = "users" - EnabledStatusKey = "enabled" - DisabledStatusKey = "disabled" - AllStatusKey = "all" -) - -var ( - // ErrMissingResetToken indicates malformed or missing reset token - // for reseting password. - ErrMissingResetToken = errors.New("missing reset token") - - // ErrRecoveryToken indicates error in generating password recovery token. - ErrRecoveryToken = errors.New("failed to generate password recovery token") - - // ErrGetToken indicates error in getting signed token. - ErrGetToken = errors.New("failed to fetch signed token") - - // ErrPasswordFormat indicates weak password. - ErrPasswordFormat = errors.New("password does not meet the requirements") - - // ErrAlreadyEnabledUser indicates the user is already enabled. - ErrAlreadyEnabledUser = errors.New("the user is already enabled") - - // ErrAlreadyDisabledUser indicates the user is already disabled. - ErrAlreadyDisabledUser = errors.New("the user is already disabled") -) - -// Service specifies an API that must be fullfiled by the domain service -// implementation, and all of its decorators (e.g. logging & metrics). -type Service interface { - // Register creates new user account. In case of the failed registration, a - // non-nil error value is returned. The user registration is only allowed - // for admin. - Register(ctx context.Context, token string, user User) (string, error) - - // Login authenticates the user given its credentials. Successful - // authentication generates new access token. Failed invocations are - // identified by the non-nil error values in the response. - Login(ctx context.Context, user User) (string, error) - - // ViewUser retrieves user info for a given user ID and an authorized token. - ViewUser(ctx context.Context, token, id string) (User, error) - - // ViewProfile retrieves user info for a given token. - ViewProfile(ctx context.Context, token string) (User, error) - - // ListUsers retrieves users list for a valid admin token. - ListUsers(ctx context.Context, token string, pm PageMetadata) (UserPage, error) - - // UpdateUser updates the user metadata. - UpdateUser(ctx context.Context, token string, user User) error - - // GenerateResetToken email where mail will be sent. - // host is used for generating reset link. - GenerateResetToken(ctx context.Context, email, host string) error - - // ChangePassword change users password for authenticated user. - ChangePassword(ctx context.Context, authToken, password, oldPassword string) error - - // ResetPassword change users password in reset flow. - // token can be authentication token or password reset token. - ResetPassword(ctx context.Context, resetToken, password string) error - - // SendPasswordReset sends reset password link to email. - SendPasswordReset(ctx context.Context, host, email, token string) error - - // ListMembers retrieves everything that is assigned to a group identified by groupID. - ListMembers(ctx context.Context, token, groupID string, pm PageMetadata) (UserPage, error) - - // EnableUser logically enableds the user identified with the provided ID - EnableUser(ctx context.Context, token, id string) error - - // DisableUser logically disables the user identified with the provided ID - DisableUser(ctx context.Context, token, id string) error -} - -// PageMetadata contains page metadata that helps navigation. -type PageMetadata struct { - Total uint64 - Offset uint64 - Limit uint64 - Email string - Status string - Metadata Metadata -} - -// GroupPage contains a page of groups. -type GroupPage struct { - PageMetadata - Groups []auth.Group -} - -// UserPage contains a page of users. -type UserPage struct { - PageMetadata - Users []User -} - -var _ Service = (*usersService)(nil) - -type usersService struct { - users UserRepository - hasher Hasher - email Emailer - auth mainflux.AuthServiceClient - idProvider mainflux.IDProvider - passRegex *regexp.Regexp -} - -// New instantiates the users service implementation -func New(users UserRepository, hasher Hasher, auth mainflux.AuthServiceClient, e Emailer, idp mainflux.IDProvider, passRegex *regexp.Regexp) Service { - return &usersService{ - users: users, - hasher: hasher, - auth: auth, - email: e, - idProvider: idp, - passRegex: passRegex, - } -} - -func (svc usersService) Register(ctx context.Context, token string, user User) (string, error) { - if err := svc.checkAuthz(ctx, token); err != nil { - return "", err - } - - if err := user.Validate(); err != nil { - return "", err - } - if !svc.passRegex.MatchString(user.Password) { - return "", ErrPasswordFormat - } - - uid, err := svc.idProvider.ID() - if err != nil { - return "", err - } - user.ID = uid - - if err := svc.claimOwnership(ctx, user.ID, usersObjKey, memberRelationKey); err != nil { - return "", err - } - - hash, err := svc.hasher.Hash(user.Password) - if err != nil { - return "", errors.Wrap(errors.ErrMalformedEntity, err) - } - user.Password = hash - if user.Status == "" { - user.Status = EnabledStatusKey - } - - if user.Status != AllStatusKey && - user.Status != EnabledStatusKey && - user.Status != DisabledStatusKey { - return "", apiutil.ErrInvalidStatus - } - - uid, err = svc.users.Save(ctx, user) - if err != nil { - return "", err - } - return uid, nil -} - -func (svc usersService) checkAuthz(ctx context.Context, token string) error { - if err := svc.authorize(ctx, "*", "user", "create"); err == nil { - return nil - } - if token == "" { - return errors.ErrAuthentication - } - - ir, err := svc.identify(ctx, token) - if err != nil { - return err - } - - return svc.authorize(ctx, ir.id, authoritiesObjKey, memberRelationKey) -} - -func (svc usersService) Login(ctx context.Context, user User) (string, error) { - dbUser, err := svc.users.RetrieveByEmail(ctx, user.Email) - if err != nil { - return "", errors.Wrap(errors.ErrAuthentication, err) - } - if err := svc.hasher.Compare(user.Password, dbUser.Password); err != nil { - return "", errors.Wrap(errors.ErrAuthentication, err) - } - return svc.issue(ctx, dbUser.ID, dbUser.Email, auth.LoginKey) -} - -func (svc usersService) ViewUser(ctx context.Context, token, id string) (User, error) { - if _, err := svc.identify(ctx, token); err != nil { - return User{}, err - } - - dbUser, err := svc.users.RetrieveByID(ctx, id) - if err != nil { - return User{}, errors.Wrap(errors.ErrNotFound, err) - } - - return User{ - ID: id, - Email: dbUser.Email, - Password: "", - Metadata: dbUser.Metadata, - Status: dbUser.Status, - }, nil -} - -func (svc usersService) ViewProfile(ctx context.Context, token string) (User, error) { - ir, err := svc.identify(ctx, token) - if err != nil { - return User{}, err - } - - dbUser, err := svc.users.RetrieveByEmail(ctx, ir.email) - if err != nil { - return User{}, errors.Wrap(errors.ErrAuthentication, err) - } - - return User{ - ID: dbUser.ID, - Email: ir.email, - Metadata: dbUser.Metadata, - }, nil -} - -func (svc usersService) ListUsers(ctx context.Context, token string, pm PageMetadata) (UserPage, error) { - id, err := svc.identify(ctx, token) - if err != nil { - return UserPage{}, err - } - - if err := svc.authorize(ctx, id.id, "authorities", "member"); err != nil { - return UserPage{}, err - } - return svc.users.RetrieveAll(ctx, nil, pm) -} - -func (svc usersService) UpdateUser(ctx context.Context, token string, u User) error { - ir, err := svc.identify(ctx, token) - if err != nil { - return err - } - user := User{ - Email: ir.email, - Metadata: u.Metadata, - } - return svc.users.UpdateUser(ctx, user) -} - -func (svc usersService) GenerateResetToken(ctx context.Context, email, host string) error { - user, err := svc.users.RetrieveByEmail(ctx, email) - if err != nil || user.Email == "" { - return errors.ErrNotFound - } - t, err := svc.issue(ctx, user.ID, user.Email, auth.RecoveryKey) - if err != nil { - return errors.Wrap(ErrRecoveryToken, err) - } - return svc.SendPasswordReset(ctx, host, email, t) -} - -func (svc usersService) ResetPassword(ctx context.Context, resetToken, password string) error { - ir, err := svc.identify(ctx, resetToken) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - u, err := svc.users.RetrieveByEmail(ctx, ir.email) - if err != nil { - return err - } - if u.Email == "" { - return errors.ErrNotFound - } - if !svc.passRegex.MatchString(password) { - return ErrPasswordFormat - } - password, err = svc.hasher.Hash(password) - if err != nil { - return err - } - return svc.users.UpdatePassword(ctx, ir.email, password) -} - -func (svc usersService) ChangePassword(ctx context.Context, authToken, password, oldPassword string) error { - ir, err := svc.identify(ctx, authToken) - if err != nil { - return errors.Wrap(errors.ErrAuthentication, err) - } - if !svc.passRegex.MatchString(password) { - return ErrPasswordFormat - } - u := User{ - Email: ir.email, - Password: oldPassword, - } - if _, err := svc.Login(ctx, u); err != nil { - return errors.ErrAuthentication - } - u, err = svc.users.RetrieveByEmail(ctx, ir.email) - if err != nil || u.Email == "" { - return errors.ErrNotFound - } - - password, err = svc.hasher.Hash(password) - if err != nil { - return err - } - return svc.users.UpdatePassword(ctx, ir.email, password) -} - -func (svc usersService) SendPasswordReset(_ context.Context, host, email, token string) error { - to := []string{email} - return svc.email.SendPasswordReset(to, host, token) -} - -func (svc usersService) ListMembers(ctx context.Context, token, groupID string, pm PageMetadata) (UserPage, error) { - if _, err := svc.identify(ctx, token); err != nil { - return UserPage{}, err - } - - userIDs, err := svc.members(ctx, token, groupID, pm.Offset, pm.Limit) - if err != nil { - return UserPage{}, err - } - - if len(userIDs) == 0 { - return UserPage{ - Users: []User{}, - PageMetadata: PageMetadata{ - Total: 0, - Offset: pm.Offset, - Limit: pm.Limit, - }, - }, nil - } - - return svc.users.RetrieveAll(ctx, userIDs, pm) -} - -func (svc usersService) EnableUser(ctx context.Context, token, id string) error { - if err := svc.changeStatus(ctx, token, id, EnabledStatusKey); err != nil { - return err - } - return nil -} - -func (svc usersService) DisableUser(ctx context.Context, token, id string) error { - if err := svc.changeStatus(ctx, token, id, DisabledStatusKey); err != nil { - return err - } - return nil -} - -func (svc usersService) changeStatus(ctx context.Context, token, id, status string) error { - if _, err := svc.identify(ctx, token); err != nil { - return err - } - - dbUser, err := svc.users.RetrieveByID(ctx, id) - if err != nil { - return errors.Wrap(errors.ErrNotFound, err) - } - if dbUser.Status == status { - if status == DisabledStatusKey { - return ErrAlreadyDisabledUser - } - return ErrAlreadyEnabledUser - } - - return svc.users.ChangeStatus(ctx, id, status) -} - -// Auth helpers -func (svc usersService) issue(ctx context.Context, id, email string, keyType uint32) (string, error) { - key, err := svc.auth.Issue(ctx, &mainflux.IssueReq{Id: id, Email: email, Type: keyType}) - if err != nil { - return "", errors.Wrap(errors.ErrNotFound, err) - } - return key.GetValue(), nil -} - -type userIdentity struct { - id string - email string -} - -func (svc usersService) identify(ctx context.Context, token string) (userIdentity, error) { - identity, err := svc.auth.Identify(ctx, &mainflux.Token{Value: token}) - if err != nil { - return userIdentity{}, errors.Wrap(errors.ErrAuthentication, err) - } - - return userIdentity{identity.Id, identity.Email}, nil -} - -func (svc usersService) authorize(ctx context.Context, subject, object, relation string) error { - req := &mainflux.AuthorizeReq{ - Sub: subject, - Obj: object, - Act: relation, - } - res, err := svc.auth.Authorize(ctx, req) - if err != nil { - return errors.Wrap(errors.ErrAuthorization, err) - } - if !res.GetAuthorized() { - return errors.ErrAuthorization - } - return nil -} - -func (svc usersService) claimOwnership(ctx context.Context, subject, object, relation string) error { - req := &mainflux.AddPolicyReq{ - Sub: subject, - Obj: object, - Act: relation, - } - res, err := svc.auth.AddPolicy(ctx, req) - if err != nil { - return errors.Wrap(errors.ErrAuthorization, err) - } - if !res.GetAuthorized() { - return errors.ErrAuthorization - } - return nil -} - -func (svc usersService) members(ctx context.Context, token, groupID string, limit, offset uint64) ([]string, error) { - req := mainflux.MembersReq{ - Token: token, - GroupID: groupID, - Offset: offset, - Limit: limit, - Type: "users", - } - - res, err := svc.auth.Members(ctx, &req) - if err != nil { - return nil, err - } - return res.Members, nil -} diff --git a/users/service_test.go b/users/service_test.go deleted file mode 100644 index dbc98fcb65..0000000000 --- a/users/service_test.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users_test - -import ( - "context" - "fmt" - "regexp" - "testing" - - "github.com/mainflux/mainflux" - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/pkg/uuid" - "github.com/mainflux/mainflux/users" - - "github.com/mainflux/mainflux/users/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const wrong string = "wrong-value" - -var ( - user = users.User{Email: "user@example.com", Password: "password", Metadata: map[string]interface{}{"role": "user"}} - nonExistingUser = users.User{Email: "non-ex-user@example.com", Password: "password", Metadata: map[string]interface{}{"role": "user"}} - host = "example.com" - - idProvider = uuid.New() - passRegex = regexp.MustCompile("^.{8,}$") - - unauthzToken = "unauthorizedtoken" -) - -func newService() users.Service { - userRepo := mocks.NewUserRepository() - hasher := mocks.NewHasher() - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - mockAuthzDB[unauthzToken] = append(mockAuthzDB[unauthzToken], mocks.SubjectSet{Object: "nothing", Relation: "do"}) - mockUsers := map[string]string{user.Email: user.Email, unauthzToken: unauthzToken} - - authSvc := mocks.NewAuthService(mockUsers, mockAuthzDB) - e := mocks.NewEmailer() - - return users.New(userRepo, hasher, authSvc, e, idProvider, passRegex) -} - -func TestRegister(t *testing.T) { - svc := newService() - - cases := []struct { - desc string - user users.User - token string - err error - }{ - { - desc: "register new user", - user: user, - token: user.Email, - err: nil, - }, - { - desc: "register existing user", - user: user, - token: user.Email, - err: errors.ErrConflict, - }, - { - desc: "register new user with weak password", - user: users.User{ - Email: user.Email, - Password: "weak", - }, - token: user.Email, - err: users.ErrPasswordFormat, - }, - { - desc: "register a new user with unauthorized access", - user: users.User{Email: "newuser@example.com", Password: "12345678"}, - err: errors.ErrAuthorization, - token: unauthzToken, - }, - } - - for _, tc := range cases { - _, err := svc.Register(context.Background(), tc.token, tc.user) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestLogin(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - noAuthUser := users.User{ - Email: "email@test.com", - Password: "12345678", - } - - cases := []struct { - desc string - user users.User - response string - err error - }{ - { - desc: "login with good credentials", - user: user, - response: user.Email, - err: nil, - }, - { - desc: "login with wrong e-mail", - user: users.User{ - Email: wrong, - Password: user.Password, - }, - response: "", - err: errors.ErrAuthentication, - }, - { - desc: "login with wrong password", - user: users.User{ - Email: user.Email, - Password: wrong, - }, - response: "", - err: errors.ErrAuthentication, - }, - { - desc: "login failed auth", - user: noAuthUser, - response: "", - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - email, err := svc.Login(context.Background(), tc.user) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - assert.Equal(t, tc.response, email, fmt.Sprintf("%s: expected %s but got %s", tc.desc, tc.response, email)) - } -} - -func TestViewUser(t *testing.T) { - svc := newService() - id, err := svc.Register(context.Background(), user.Email, user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - token, err := svc.Login(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - u := user - u.Password = "" - - cases := []struct { - desc string - user users.User - token string - userID string - err error - }{ - { - desc: "view user with authorized token", - user: u, - token: token, - userID: id, - err: nil, - }, - { - desc: "view user with empty token", - user: users.User{}, - token: "", - userID: id, - err: errors.ErrAuthentication, - }, - { - desc: "view user with valid token and invalid user id", - user: users.User{}, - token: token, - userID: "", - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - _, err := svc.ViewUser(context.Background(), tc.token, tc.userID) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestViewProfile(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - token, err := svc.Login(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - u := user - u.Password = "" - - cases := []struct { - desc string - user users.User - token string - err error - }{ - { - desc: "valid token's user info", - user: u, - token: token, - err: nil, - }, - { - desc: "invalid token's user info", - user: users.User{}, - token: "", - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - _, err := svc.ViewProfile(context.Background(), tc.token) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestListUsers(t *testing.T) { - svc := newService() - - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - token, err := svc.Login(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - var nUsers = uint64(10) - - for i := uint64(1); i < nUsers; i++ { - email := fmt.Sprintf("TestListUsers%d@example.com", i) - user := users.User{ - Email: email, - Password: "passpass", - } - _, err := svc.Register(context.Background(), token, user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - } - - cases := []struct { - desc string - token string - offset uint64 - limit uint64 - email string - size uint64 - err error - }{ - { - desc: "list users with authorized token", - token: token, - size: 0, - err: nil, - }, - { - desc: "list users with unauthorized token", - token: unauthzToken, - size: 0, - err: errors.ErrAuthorization, - }, - { - desc: "list user with empty token", - token: "", - size: 0, - err: errors.ErrAuthentication, - }, - { - desc: "list users with offset and limit", - token: token, - offset: 6, - limit: nUsers, - size: nUsers - 6, - }, - { - desc: "list using non-existent user", - token: token, - email: nonExistingUser.Email, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - pm := users.PageMetadata{ - Offset: tc.offset, - Limit: tc.limit, - Email: tc.email, - Metadata: nil, - Status: "all", - } - - page, err := svc.ListUsers(context.Background(), tc.token, pm) - size := uint64(len(page.Users)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", tc.desc, tc.size, size)) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestUpdateUser(t *testing.T) { - svc := newService() - - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - token, err := svc.Login(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - user.Metadata = map[string]interface{}{"role": "test"} - - cases := []struct { - desc string - user users.User - token string - err error - }{ - { - desc: "update user with valid token", - user: user, - token: token, - err: nil, - }, - { - desc: "update user with invalid token", - user: user, - token: "non-existent", - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - err := svc.UpdateUser(context.Background(), tc.token, tc.user) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestGenerateResetToken(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - cases := []struct { - desc string - email string - err error - }{ - { - desc: "valid user reset token", - email: user.Email, - err: nil, - }, - { - desc: "invalid user rest token", - email: nonExistingUser.Email, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.GenerateResetToken(context.Background(), tc.email, host) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestChangePassword(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("register user error: %s", err)) - token, _ := svc.Login(context.Background(), user) - - cases := []struct { - desc string - token string - password string - oldPassword string - err error - }{ - { - desc: "valid user change password ", - token: token, - password: "newpassword", - oldPassword: user.Password, - err: nil, - }, - { - desc: "valid user change password with wrong password", - token: token, - password: "newpassword", - oldPassword: "wrongpassword", - err: errors.ErrAuthentication, - }, - { - desc: "valid user change password invalid token", - token: "", - password: "newpassword", - oldPassword: user.Password, - err: errors.ErrAuthentication, - }, - } - - for _, tc := range cases { - err := svc.ChangePassword(context.Background(), tc.token, tc.password, tc.oldPassword) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - - } -} - -func TestResetPassword(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - mockAuthzDB := map[string][]mocks.SubjectSet{} - mockAuthzDB[user.Email] = append(mockAuthzDB[user.Email], mocks.SubjectSet{Object: "authorities", Relation: "member"}) - authSvc := mocks.NewAuthService(map[string]string{user.Email: user.Email}, mockAuthzDB) - - resetToken, err := authSvc.Issue(context.Background(), &mainflux.IssueReq{Id: user.ID, Email: user.Email, Type: 2}) - require.Nil(t, err, fmt.Sprintf("Generating reset token expected to succeed: %s", err)) - cases := []struct { - desc string - token string - password string - err error - }{ - { - desc: "reset user password with valid resetToken", - token: resetToken.GetValue(), - password: password, - err: nil, - }, - { - desc: "reset user password with invalid resetToken", - token: unauthzToken, - password: password, - err: errors.ErrNotFound, - }, - { - desc: "reset user password with empty resetToken", - token: unauthzToken, - password: password, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.ResetPassword(context.Background(), tc.token, tc.password) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestSendPasswordReset(t *testing.T) { - svc := newService() - _, err := svc.Register(context.Background(), user.Email, user) - require.Nil(t, err, fmt.Sprintf("register user error: %s", err)) - token, _ := svc.Login(context.Background(), user) - - cases := []struct { - desc string - token string - email string - err error - }{ - { - desc: "send password reset", - token: token, - email: user.Email, - err: nil, - }, - } - - for _, tc := range cases { - err := svc.SendPasswordReset(context.Background(), host, tc.email, tc.token) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} - -func TestDisableUser(t *testing.T) { - enabledUser1 := users.User{Email: "user1@example.com", Password: "password"} - enabledUser2 := users.User{Email: "user2@example.com", Password: "password", Status: "enabled"} - disabledUser1 := users.User{Email: "user3@example.com", Password: "password", Status: "disabled"} - - svc := newService() - - id, err := svc.Register(context.Background(), user.Email, user) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - user.ID = id - user.Status = "enabled" - token, err := svc.Login(context.Background(), user) - require.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - - id, err = svc.Register(context.Background(), token, enabledUser1) - assert.Nil(t, err, fmt.Sprintf("register enabledUser1 error: %s", err)) - enabledUser1.ID = id - enabledUser1.Status = "enabled" - - id, err = svc.Register(context.Background(), token, enabledUser2) - require.Nil(t, err, fmt.Sprintf("register enabledUser2 error: %s", err)) - enabledUser2.ID = id - enabledUser2.Status = "disabled" - - id, err = svc.Register(context.Background(), token, disabledUser1) - require.Nil(t, err, fmt.Sprintf("register disabledUser1 error: %s", err)) - disabledUser1.ID = id - disabledUser1.Status = "disabled" - - cases := []struct { - desc string - id string - token string - err error - }{ - { - desc: "disable user with wrong credentials", - id: enabledUser2.ID, - token: "", - err: errors.ErrAuthentication, - }, - { - desc: "disable existing user", - id: enabledUser2.ID, - token: token, - err: nil, - }, - { - desc: "disable disabled user", - id: enabledUser2.ID, - token: token, - err: users.ErrAlreadyDisabledUser, - }, - { - desc: "disable non-existing user", - id: "", - token: token, - err: errors.ErrNotFound, - }, - } - - for _, tc := range cases { - err := svc.DisableUser(context.Background(), tc.token, tc.id) - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } - - _, err = svc.Login(context.Background(), enabledUser2) - assert.True(t, errors.Contains(err, errors.ErrNotFound), fmt.Sprintf("Login disabled user: expected %s got %s\n", errors.ErrNotFound, err)) - - cases2 := map[string]struct { - status string - size uint64 - response []users.User - }{ - "list enabled users": { - status: "enabled", - size: 2, - response: []users.User{enabledUser1, user}, - }, - "list disabled users": { - status: "disabled", - size: 2, - response: []users.User{enabledUser2, disabledUser1}, - }, - "list enabled and disabled users": { - status: "all", - size: 4, - response: []users.User{enabledUser1, enabledUser2, disabledUser1, user}, - }, - } - - for desc, tc := range cases2 { - pm := users.PageMetadata{ - Offset: 0, - Limit: 100, - Status: tc.status, - } - page, err := svc.ListUsers(context.Background(), token, pm) - assert.Nil(t, err, fmt.Sprintf("unexpected error: %s", err)) - size := uint64(len(page.Users)) - assert.Equal(t, tc.size, size, fmt.Sprintf("%s: expected size %d got %d\n", desc, tc.size, size)) - assert.ElementsMatch(t, tc.response, page.Users, fmt.Sprintf("%s: expected %s got %s\n", desc, tc.response, page.Users)) - } -} diff --git a/users/tracing/users.go b/users/tracing/users.go deleted file mode 100644 index 2036b6a564..0000000000 --- a/users/tracing/users.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -// Package tracing contains middlewares that will add spans -// to existing traces. -package tracing - -import ( - "context" - - "github.com/mainflux/mainflux/users" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - saveOp = "save_op" - retrieveByEmailOp = "retrieve_by_email" - updatePassword = "update_password" - members = "members" -) - -var _ users.UserRepository = (*userRepositoryMiddleware)(nil) - -type userRepositoryMiddleware struct { - tracer opentracing.Tracer - repo users.UserRepository -} - -// UserRepositoryMiddleware tracks request and their latency, and adds spans -// to context. -func UserRepositoryMiddleware(tracer opentracing.Tracer, repo users.UserRepository) users.UserRepository { - return userRepositoryMiddleware{ - tracer: tracer, - repo: repo, - } -} - -func (urm userRepositoryMiddleware) Save(ctx context.Context, user users.User) (string, error) { - span := createSpan(ctx, urm.tracer, saveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.Save(ctx, user) -} - -func (urm userRepositoryMiddleware) UpdateUser(ctx context.Context, user users.User) error { - span := createSpan(ctx, urm.tracer, saveOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.UpdateUser(ctx, user) -} - -func (urm userRepositoryMiddleware) RetrieveByEmail(ctx context.Context, email string) (users.User, error) { - span := createSpan(ctx, urm.tracer, retrieveByEmailOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.RetrieveByEmail(ctx, email) -} - -func (urm userRepositoryMiddleware) RetrieveByID(ctx context.Context, id string) (users.User, error) { - span := createSpan(ctx, urm.tracer, retrieveByEmailOp) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.RetrieveByID(ctx, id) -} - -func (urm userRepositoryMiddleware) UpdatePassword(ctx context.Context, email, password string) error { - span := createSpan(ctx, urm.tracer, updatePassword) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.UpdatePassword(ctx, email, password) -} - -func (urm userRepositoryMiddleware) RetrieveAll(ctx context.Context, ids []string, pm users.PageMetadata) (users.UserPage, error) { - span := createSpan(ctx, urm.tracer, members) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.RetrieveAll(ctx, ids, pm) -} - -func (urm userRepositoryMiddleware) ChangeStatus(ctx context.Context, id, status string) error { - span := createSpan(ctx, urm.tracer, members) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - - return urm.repo.ChangeStatus(ctx, id, status) -} - -func createSpan(ctx context.Context, tracer opentracing.Tracer, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tracer.StartSpan(opName) -} diff --git a/users/users.go b/users/users.go deleted file mode 100644 index 63002ce761..0000000000 --- a/users/users.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/mainflux/mainflux/pkg/errors" - "golang.org/x/net/idna" -) - -const ( - maxLocalLen = 64 - maxDomainLen = 255 - maxTLDLen = 24 // longest TLD currently in existence - - atSeparator = "@" - dotSeparator = "." -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile(`^[^\s]+\.[^\s]+$`) - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") -) - -// Metadata to be used for Mainflux thing or channel for customized -// describing of particular thing or channel. -type Metadata map[string]interface{} - -// User represents a Mainflux user account. Each user is identified given its -// email and password. -type User struct { - ID string - Email string - Password string - Metadata Metadata - Status string -} - -// Validate returns an error if user representation is invalid. -func (u User) Validate() error { - if !isEmail(u.Email) { - return errors.ErrMalformedEntity - } - return nil -} - -// UserRepository specifies an account persistence API. -type UserRepository interface { - // Save persists the user account. A non-nil error is returned to indicate - // operation failure. - Save(ctx context.Context, u User) (string, error) - - // Update updates the user metadata. - UpdateUser(ctx context.Context, u User) error - - // RetrieveByEmail retrieves user by its unique identifier (i.e. email). - RetrieveByEmail(ctx context.Context, email string) (User, error) - - // RetrieveByID retrieves user by its unique identifier ID. - RetrieveByID(ctx context.Context, id string) (User, error) - - // RetrieveAll retrieves all users for given array of userIDs. - RetrieveAll(ctx context.Context, userIDs []string, pm PageMetadata) (UserPage, error) - - // UpdatePassword updates password for user with given email - UpdatePassword(ctx context.Context, email, password string) error - - // ChangeStatus changes users status to enabled or disabled - ChangeStatus(ctx context.Context, id, status string) error -} - -func isEmail(email string) bool { - if email == "" { - return false - } - - es := strings.Split(email, atSeparator) - if len(es) != 2 { - return false - } - local, host := es[0], es[1] - - if local == "" || len(local) > maxLocalLen { - return false - } - - hs := strings.Split(host, dotSeparator) - if len(hs) < 2 { - return false - } - domain, ext := hs[0], hs[1] - - // Check subdomain and validate - if len(hs) > 2 { - if domain == "" { - return false - } - - for i := 1; i < len(hs)-1; i++ { - sub := hs[i] - if sub == "" { - return false - } - domain = fmt.Sprintf("%s.%s", domain, sub) - } - - ext = hs[len(hs)-1] - } - - if domain == "" || len(domain) > maxDomainLen { - return false - } - if ext == "" || len(ext) > maxTLDLen { - return false - } - - punyLocal, err := idna.ToASCII(local) - if err != nil { - return false - } - punyHost, err := idna.ToASCII(host) - if err != nil { - return false - } - - if userDotRegexp.MatchString(punyLocal) || !userRegexp.MatchString(punyLocal) || !hostRegexp.MatchString(punyHost) { - return false - } - - return true -} diff --git a/users/users_test.go b/users/users_test.go deleted file mode 100644 index 1010c7211d..0000000000 --- a/users/users_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) Mainflux -// SPDX-License-Identifier: Apache-2.0 - -package users_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/mainflux/mainflux/pkg/errors" - "github.com/mainflux/mainflux/users" - "github.com/stretchr/testify/assert" -) - -const ( - email = "user@example.com" - password = "password" - - maxLocalLen = 64 - maxDomainLen = 255 - maxTLDLen = 24 -) - -var letters = "abcdefghijklmnopqrstuvwxyz" - -func randomString(n int) string { - b := make([]byte, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} - -func TestValidate(t *testing.T) { - cases := []struct { - desc string - user users.User - err error - }{ - { - desc: "validate user with valid data", - user: users.User{ - Email: email, - Password: password, - }, - err: nil, - }, - { - desc: "validate user with valid domain and subdomain", - user: users.User{ - Email: "user@example.sub.domain.com", - Password: password, - }, - err: nil, - }, - { - desc: "validate user with invalid subdomain", - user: users.User{ - Email: "user@example..domain.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with invalid domain", - user: users.User{ - Email: "user@.sub.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with empty email", - user: users.User{ - Email: "", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with invalid email", - user: users.User{ - Email: "userexample.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with utf8 email (cyrillic)", - user: users.User{ - Email: "почта@кино-россия.рф", - Password: password, - }, - err: nil, - }, - { - desc: "validate user with utf8 email (hieroglyph)", - user: users.User{ - Email: "艾付忧西开@艾付忧西开.再得", - Password: password, - }, - err: nil, - }, - { - desc: "validate user with no email tld", - user: users.User{ - Email: "user@example.", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with too long email tld", - user: users.User{ - Email: "user@example." + randomString(maxTLDLen+1), - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with no email domain", - user: users.User{ - Email: "user@.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with too long email domain", - user: users.User{ - Email: "user@" + randomString(maxDomainLen+1) + ".com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with no email local", - user: users.User{ - Email: "@example.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - { - desc: "validate user with too long email local", - user: users.User{ - Email: randomString(maxLocalLen+1) + "@example.com", - Password: password, - }, - err: errors.ErrMalformedEntity, - }, - } - - for _, tc := range cases { - err := tc.user.Validate() - assert.True(t, errors.Contains(err, tc.err), fmt.Sprintf("%s: expected %s got %s\n", tc.desc, tc.err, err)) - } -} diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b3..8bf0e5b781 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000000..94b9c44398 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d541..a9e0d45c9d 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b807f4..0000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf79..3e8b132579 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000000..7e3145a221 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 0000000000..9216e0a40c --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821603..26df13bba4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a31..e86f1b5fd8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e4..1c1638fd88 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE new file mode 100644 index 0000000000..d2d1dd933e --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/LICENSE @@ -0,0 +1,17 @@ +ISC License + +Copyright (c) 2013-2017 The btcsuite developers +Copyright (c) 2015-2020 The Decred developers +Copyright (c) 2017 The Lightning Network Developers + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md new file mode 100644 index 0000000000..b84bcdb77d --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/README.md @@ -0,0 +1,72 @@ +secp256k1 +========= + +[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions) +[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4) + +Package secp256k1 implements optimized secp256k1 elliptic curve operations. + +This package provides an optimized pure Go implementation of elliptic curve +cryptography operations over the secp256k1 curve as well as data structures and +functions for working with public and private secp256k1 keys. See +https://www.secg.org/sec2-v2.pdf for details on the standard. + +In addition, sub packages are provided to produce, verify, parse, and serialize +ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme +specific to Decred) signatures. See the README.md files in the relevant sub +packages for more details about those aspects. + +An overview of the features provided by this package are as follows: + +- Private key generation, serialization, and parsing +- Public key generation, serialization and parsing per ANSI X9.62-1998 + - Parses uncompressed, compressed, and hybrid public keys + - Serializes uncompressed and compressed public keys +- Specialized types for performing optimized and constant time field operations + - `FieldVal` type for working modulo the secp256k1 field prime + - `ModNScalar` type for working modulo the secp256k1 group order +- Elliptic curve operations in Jacobian projective coordinates + - Point addition + - Point doubling + - Scalar multiplication with an arbitrary point + - Scalar multiplication with the base point (group generator) +- Point decompression from a given x coordinate +- Nonce generation via RFC6979 with support for extra data and version + information that can be used to prevent nonce reuse between signing algorithms + +It also provides an implementation of the Go standard library `crypto/elliptic` +`Curve` interface via the `S256` function so that it may be used with other +packages in the standard library such as `crypto/tls`, `crypto/x509`, and +`crypto/ecdsa`. However, in the case of ECDSA, it is highly recommended to use +the `ecdsa` sub package of this package instead since it is optimized +specifically for secp256k1 and is significantly faster as a result. + +Although this package was primarily written for dcrd, it has intentionally been +designed so it can be used as a standalone package for any projects needing to +use optimized secp256k1 elliptic curve cryptography. + +Finally, a comprehensive suite of tests is provided to provide a high level of +quality assurance. + +## secp256k1 use in Decred + +At the time of this writing, the primary public key cryptography in widespread +use on the Decred network used to secure coins is based on elliptic curves +defined by the secp256k1 domain parameters. + +## Installation and Updating + +This package is part of the `github.com/decred/dcrd/dcrec/secp256k1/v4` module. +Use the standard go tooling for working with modules to incorporate it. + +## Examples + +* [Encryption](https://pkg.go.dev/github.com/decred/dcrd/dcrec/secp256k1/v4#example-package-EncryptDecryptMessage) + Demonstrates encrypting and decrypting a message using a shared key derived + through ECDHE. + +## License + +Package secp256k1 is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go new file mode 100644 index 0000000000..bb0b41fda1 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/compressedbytepoints.go @@ -0,0 +1,18 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// Auto-generated file (see genprecomps.go) +// DO NOT EDIT + +var compressedBytePoints = "eJyk2wNCIAgAAMBs27Zt27a52bZt27Zt27Ztu+4RN/8YgP8pmqLC650yqn+RN9o5WamIzkLa9Agq2bEoRaoJ/oRJplKgCMS4iEExiT3u5gnfu//K8yz4XhSnQzTXYdtp6i45sAprIIyKo/AhtXelFMQPtnAcGf58V/JbVveLAN8Vcf0Q+bnIk0kWveCiUp9zy1Ko9e0uaYkPT0UfsCTgaZhyQTuNksSeqX9vTrQ5GH/G8fWiUIiM8Zvl31woXxiquMqOQMJCoQiZ/iXuDOqY7tTyHlv85rmSanWdJIe0JD1VkaEwqclvlvbtOy07cCk4KT/jXWxBk6kb7TMLkeWtCZli02fkxlspmDtHfMonSi0ScKI+ypNPlZVHgO1W4GkR4AkSbYSxX5oGlIctKHhCjRY9eR0GsIluyh4QFg9t4FU0TdkfLf4BSXR4MMmKV/aDCqtzxLvXaFGs0GVb7zLqgb0kn7nDXfGhpaDHAVIodPh3ummk9NZItmfr8juoNyszjGfELYp+OX5UK0lt7hcNU4NjciFV5Wd7rF4mtLxEEuUAmyUdQwDHGfahHiT9dk4+r7K6EPYYBbBaAQeWYegqF0z5jh/FwuFiOMV3oh7nteiCQCzGOJqamLDUBuox6qN4ZOLY4x+vIdtftTZM3+hI/xBsP5Zv0Yo1QQ8jcIP70VItBoA5MhNnb/AxAJzAPDvNTmrNgIqr66MA4MupfWmXLUa0gZu6/vA5lc2V6cKJC0xIayi9bT8kBWbiIVj/9oh5qCV/g53tVrxQ6XfhqsPWYFps2OYNc4vTaFEdPWT7BVQ8mWoNfwWqBHRV5KIyHgt1Mavq2Z7ogUtQRuRyipzfdg8hKVRf1r94lYx6hjntvVTgLiRvOtETLWBq/baEcWugzzIzxpLreLZWuSpPIj+yd7xLqrhNUu6uWQIESsLW8mIPQPS9tpxRI70BAQEYTY+gGvz8N5xgw3s1lCLjdaQA5ncTo18lEO1pjX0HkiKXSwBpVohq/1PSTUKwMXmdemLJmH0VfWx+xu+rIySuZYabbIJZRmoP7hx4qrjNR7ZLln4Fa96EAuW9C2RGaEjJXQ9XFGPJDi3O6PpdBHFKaPg0HXSqNaSf5HVB/npt+uEgjJWHh/bMHlB+Cq37JsrxklflGImfRY0WYdtecfotfzSaGGJXYGPdY6+Uk5OmyIgHXhgmAvG8kLXWetUZMxo5SuRCpdfdoNQ68uHArCZBkTJ2PTWfBs9MFazROg8x1CQdlvnwm3GSYWGuoJ6WsgBgGHZAxDltrlZDiYEpBMu2yV+sJ/M4cX1Vid9DJbwSWuMXdX22YqljiMvUf8gp5OUDO8mz8uD4izxRrDBa0k3sEsOdiPjVk/tGcJ9d47fkgYdy3jBfKSNrrG1Bvdn37xeqdSxFcEYdoggg3n8iy8Wo4FsHQJwB3QnexMLFrXO/4o5JCTJYUZFGBlT9yeU5rCdnGvJZeE68PVJVTlVxezDz3hnIMuy+EiH0QoasAI2GGlHvlsfDLcad8O7RfbwYd6nlG5phUmqOOQ6xgKFG309xCsbKvDumzmZ8WpVWGCvxYKExEGktwqkKfedH9PbOqcLSpzIDmoCD+/IQv2ZSKB3WzF+bZzAoxImoNSJPDDeCNkpeTbu58L3n8UQ6S3SHPhRoT9QneplWmOAhQoLTGNmzIqMZUflkPDNZJ1PUr7qnLP3OiPWWKfVm9HIXv7X2zdP0oFP5yC5cGCKG83zVSeBqd14przJKjmTOJTEFIJcZZt6LLnSPjxZkSMge3GLpx7KkCao7yMOxdxm7gKn4l5w4vrz1D/MuTgXvedy45y4n3YzmeUNNV12X5hUuCnC/RbBIfxXrE1JJffx1VpHc2Z88roux91yJK3zm9FHPcxWFL2eElsDqvYgoYlrrxIDCF8w0L9xVCJ/K1wBYV6z0Il1liZFnrh7oC7QnCoi/AZVC5wzuseezcZmsO1PXijTQ0Bz2N8iU9xr6H+hAMrmj5/kfZNGZwfRbQEYgFxnYiZ/RcnKxv6L/Ug2cGHP9l0pkpT1bcb6HfTZLVoy+UXd2qckfNk5rcKMh1Rl49vNssVRf7Hd8AfivO3ESlJUSz4MFkdDmiOzmO3GCXh864/drgFmZq6f1XiZsaz4r6NbjOTOQdQJf21TOmp1+bfXxGWCL5M810GZBhc6YLVGxcWFHwEhf/d8HuDXUa+i2WUcclqdokltn6q1nu2eLoI3aiyuYvyr08xBNo8h6o3/yX9fAyxP9Mmwwudd0GpruWyd+6Th6G7vUmuZ4LHgO1XTEVeMhK5PHoDx4J0JDDEVGv2kJdA/3hHRZXcjF/n6qveOFmgDS/K9c7qgLFfacIAS+VHxEk+Xm3UKLvhMgQXSU+GVKjei1uFONV4Ye67v33x7hbEEbT78HMWliBfVkEqbEOLiGV/81jLALoxKPMEAZaKMp2vBrwAHmcYqvhnZC1UE6Cr/JZW6k+69Bgxp+sGI3BfNvp5h/8spmJqg4myTAcqzYaW8Ami61S7cR5I5iErsXlk1oVtuqnyJi7rpaa89xob7p9udfTmT/ikUfL49GLZpxgYnjt7W/so79IqGHzZIGIIARcU7ktlYCMyhTUsBzf3Bt1fI3TPdf9q89J0+v8+lSdcIjcojNA6OKoSVmZl1IDJQ1FFUSB5nuRRnBQad6DQS/gHAZxefrebtPJA3qGzu4EsFsPJXaPKGq5gQaz1ERF1dpdRQmIeMWxLcyKaPK31zIUjilx0vGc0CupUPlgfzlAnni2l49rX6nxM8NF71OfhDKFLBIggWSfbBKORmOU1ZvFZIkfWIpDwOKy78HNLKSp/FNA7paMy9pcCxu0OvI9kcWld/GnM57qRnliEKgPUeQ9/Prs6koi6i8QAOov/xlN1Me38NfUgZIe5LJqw2p4hvWm0D7YuqCoZdYCnRVvwVDT/xcUc++4D7NeXYhKC1Ffqm8Jy7NWQE3v6koJY2q+T3yGH7+2c4V5HLboKAmLx7LSvsCRDjEhoUARsfKpiOSAxmJAf2Evbi5Z/+Ep1UFmFxGIrCPagCv/jUfTrQ+tB3CCp7L8Av2vR9x+42mKoeCso2te6hJ8+VZ3cq9beFbpyZUb/fOOwBjlS3NpWlKwU97IdO8Ey8Cr3w0TE9YOTY8Mu3LpY7j7PPBQtP/lrd9WaPQ7dJuSMp+mS/k8i4fIuuDBwMIzVouGhrn9t+nvTuXacu4D60+FYx+4+E27N0Q+QQY3rrMISc3dovqzKig1eMkIDQ8d5/Fwo7WflX2uHsYn16sKtRiQyMIagTwBBeTPEsm5U2YYkfBomnnO2pW3R9IdO2QghSgaQX50RveltU68Mck0D08wkYGWLA9TrrPfi7gzxJAyiup8DZsUrLsipVuEL47gpqdNtsaG7GV3ShLcCPfEKQEKzGRVs98mluYbdgEUagANpVz9XOstElchJ43RRkpMsmXYeoSzZatjSTDwGm6vHVgHrUrCk6dzTrrH2QZehW2xpjcc+JA4qp5zvuGSYgvC32BceJXaHvQMhwHs/5DXsxeioT3l4awYPOiJOVDihOyPZYb2iAWOUREfP3Seb2+3qyytoILxhIPf+0OYjWCTg8rb3mV3iAVpk5+PpwxDdwXI5GJ4Rz5BbmrY1ze7wcbSJvhEzIWONaUROb4ZEvIHHbMVBqyw0ITDtsv1S6dzgmZxZgRcrWSks18w49DFkEqrysIGd5HJcM5jpNWJvXax5ki9ggd6DDzzLWI6zBqgFht/NoOMiNfj8rkz61v0a8ynunoGYT2YB75l8dz80Vz+NvTEpN38PUdnv8dw12YmVQDvlXDXrrc7kdH5tigy4yLb/X8OIGY98t4PchBPxvtl/lZ7tCzOqU1fIFs5XqG0Xd5PpslyRkcpY0e81mcmxLPPE1dXm/j3t5BQpLKlKku137Y/Ln8HmU36BlrBS0Pr3Q36f5B+S8A48pP/Tb7BNB51Y8UI7Ub1vs6CueReJtn/crGW78BnfUiaJClNdGhaHdyveuLOsMjuUxQs8AUNi/miOuH1LE4s+v0UG8A4sXVdldX+FRkRgmcg5vk6xKFlGvIC1YIybgNrUgwUdE2P8iesBha81++nZ7KnL3l8E0ngcicmo3KpRbm+Wutc1c0NQhEnnjo0uiZ4xuVEFomiMtwkdD7ZlpBf/c5Awbf7wwUb4CxF+ha/zpWs/kLZIPRTaA2PHSQlGH+jSjRovkiI/Tn9Ba2mPNsW8v94k7CcHcrg6FtxVHjhd7Y6zPjOvYqeSstSHVAB9b7mLV4Q+TU1HM/g4eCcgHxQ+XIjelvMEVb4Bgp6VnQojLgPDC90zpna3WQMjvfaSoARO3vM1QioFr04Z+lg/UUre7tL3t6QYh3hPDuVC0p+y8DsZVAXs40fPjGFEUl2S4UuO5Q7+opHhQ1whzh6HySiEdJDcc/TMuk1oOVBLhEvgpzCJIngjYZ0Q8R/a5nBTNZdiEJwBlx7zjyI/BK36wsYgUW/NxePqrXlxQzmeY+o9uBGDJ4Au+WQePJ4SI/N43qkCHvnbHcCx0VTpsF/KP+ShrvwUvPlYho36mo/LA7HoywPU5iePgWgXCmA7QS9B+D+CwarAEkUtsJveNSkl5/aBqTQ7kWqA7ZP4DSR5WRPSsBSVrIH2EK1pPpQsgBCXs3o9KJROeFtFDeEEL/Yigqd5VvaQhL407jWgpFgNFlmaKcq2PTXWlpgyUwmPyUNmVHmza8saL+x7X4wxx5nBsM2UTqfafXxuR87Jv8+i1zpuNu4juOCM+QdGW9M7K7WnjUhtxq8I4p4xWzjT+/TGdcDfLZk4I45tl78NBJtR/H8mKcCaZ18eEcXf+J/nYUup7oMZu5+ed8KR48m7omvLKoDLOdVTkM+/6U5FcHDPGddnvoo6Fx9WCGkGYUPVE0y+qbFHOhIGEKpXSaJE/I2qLSYdKfIeJl0RzyqiGSVAiPE+3NWaBAGP39rkU6qmT0JixxcnRlbDrJx9ilvQNtiEZosu+M1K/8BXdvn9h3mIkrkPpKCEAPokV6J3ow1edX/3jttNgVdrLAdg9G7XkWZ1DrHb5smS2vgC5Tl4ez6YPKwROTT2y81Fo8ckE9gzWDkdLGFGM7ZTgvavoDCouXzQpgwx1wGEhYYegGbe89daiHrMIdyM1/BTu8xewC6uvpxt0NlJtgSwhZTh4/DPJKB5CD5wGrRFU9QdZpvRrh2zKhTrICkjaO60UKkjjw5S74vEMr9kYOcO9Xsugd8NXr3lToFFQPUda6LnnApS5MmCRth3sg+1/r96dEKGj6rAA+nDoQzJkZ4lnG7GWoZHyfdA+gV2Up7lpx74NOvChoWRCFwa1Ti4FfQfOCZUeiJwrdSekkNUgzkDcGXNciiAGpJaihg5PMtWVc+JL44g6fcH3wn/0hjiK0+XgkS4YXGusXaWowk1S5Mb88s1ZVfIHrVGrhKjIwXqrbjGlRsofTTW3L/1YMAfduSgZsTfE7x1pqDKJS5/QAL6/R8wkDP6znlJTOjJ3Q7mKRJSb7NxO5lqAum565vFUAEjG/F6ag+JXcliFEAtjB50aaVJq9Xdmr/41U1W3ohT37SFS+tO2QhxwCxYKLfZRZ5DTejhYjGjv9nJ+MPVs+reOkdiDN9B/umM3Dra5BYIR3e3Ma+sh7s7A3Vx8bChA637iqIcQHBRB/KC2jhIOJ6TloMJKwq7kb0E9XF9mJKmCg3hmo5d4RJzHqbNcjzYx7Yk5pW9u2bnVVTfNizvlXChMV4Jf5oYhhhryI6ltlUEoy0dcnVrxqGCr3oxeTTUf5UBKirSe1/gYybkHmnwPlavGS7xGvudqLruLswtEhy+WLGktsyMnJ8W9COKhKX+yT/JsbS4agPalAWAgq0Go9WOP3RF1RuzHL1/Twfhb1NYiWnyHTV97NVRg59dnhMnGmBgXkg61GiZUapX50LNQZxzNuqugyEHjxA5KJfat5xMGdWLkGemJFwfcuskXJeWXikIhyV4hgdXzZRgUxRslgMfjuxQLvmkV0Mo5xvsDp3TMwkg1PqkGEkXlV7AWFN3s01uhJ6yDjC2Uk+o6z0pfnoqwyJnVgeA3WjXxaVtR7egPbF8hfxxAfwKF0smjTHmboF0kgivS8DFJLv75/Ucq8fbYoMgadUoqnmcGJcHbqo0es+Dhi9Xl7vCUVADTnhnuyVi1432m6bM6IF6ay6xJCRKjLVmAyEooegmNImVU5uh5FGxvOAAyQ706a+NJGgUoH7knQYOL8ZyXPTJSWiqNEpr26wUb1hbclv6Q2iY+PTf2KneMpMOoIfJjCN562NYCeMp+soVRGxbyic3Kw5CfLkEZdmVFpFlp/gH3QYYfvp+catQcIPNavc4lWGp4i1Acl01RNbxphG4D/KBQ3D1souHPwVFKgaiC/gateFzhTrPRF19IEVRm4h1rGFjVw/0h0O8psDwOIUQVE6yWSmoOaMX56IjcaA3jEE/TStAQxNE3H5qejiTpfdUTzQdTRY2T0u2OiDdBC+f2dMXvjCGFtQItlBJQTQB+5X1HSQO3L/UBu2N2IH1WfCIaagjNUmrY+DpLuPswrx3pnRiwB+IZ/JH4x1YV5Frju6HvMEaEbV1VMA5+tyOcIbe18oGEX+GHKGb55evhOtLwWOEnIgwWtNtEu2k6sjlmeiYYIZe94CyD85JcA8jNtrGoOwPIzuOXQDjq2FyAZB4Y5oHWAFNbhuA7DjS3b/0r9FnVkB4st7jizbrNtR1TYyoGS7+lR4A7b7ya/FmVLMgKfAbohRbBjWWpfltG3mUVMQRTTI/F+Gsw0zwd0Y+OeqznaCusBQw2Lc8QWFJAylqaDK8ZRwDapfYycfqT1j2SxjIEL8FScDvLoaovVia3yBwMh2po02KhuyvIc1A3KVHZPpgD8RYU4+EhZCPky3ziajZKg3rOGHuBzw1A6g9gXNpN0pbWWS/gEyhMSGxg4AofPhtBYp1z5l/jmMREAJ0qnNpj3vXsXXaAKjNDjJel7acD0GUPaHRhsKq28WwZ/UuD+ILPjHDtblSZDMWR2a+tNY6kQm47emyolszCZo2fsc25fjpIzGXs0cetux4Dqzv03nH6XMdZwhuPqcrOixrdEEkqRPadaaRgfJymbY1HncC6myfR3zrPkEA3PLdw1x3ljW9+iZ1sPUeatW0c/nNggFNkllR4SxeOrOqgg18rwG8dPbaeun3WPVrvoIgd7oO/fUQJKSujMTWHZa8opUxebo6CahujaYmcGMC9h1ndg7r3k11sdoudFbd/0Ywwispr0E8h8XOUD2mVPlPesHFkAvglILLJ4WXRjSSnNcOdhwVilZTFgIlnDdimNm0JVz8Iz3WoP0LCp0+1sIDtpcuFVFUX6xd94DWvwsU0GstZp/Bd6TxbxfLluEDEZiy3YUJOsXSGBH8bcTJ1aZyu1woCqm4f4ogQG/otHbC0UKoYwwxNemdnuEdPF9Z0JMYKKo08Z53vle3XWrbODlTX+i9FAN5t6qe5RRgWuI6I68LMMB2ZBWGCwu8i2DinS1I9L4KgG0j0u7nA0vqcuJS/WCcLKXrgG20fgdRQWgYrhA0KHO5dqfeyNvSeU4h0pU754wmk58W07CK8z5cg/zzXGMuD8W+4PJtt+49U180vl5GWVWaUQZJ+MOOITDDV9UuA4tJ83c9vlnUzNeroPXVrPkt7kOA57ZNe2AkyfX4AR+q8paq/U8PwhABf1pXN79ERqG289WZdsb9waoaMeiyNYpvlwdD6gjoUZgOO3DdHKlYHSCeG87Wg9f0EMUkCS85KP9YETYylYArRn2Cy4CusH20VtLfLkP88l3VxQXV/PkPu0iwC3bFscgTVpUONTRqRoMCM2yI7MuLC9YPQTZsbFI4lMkpYCPKXaCra5lirPqucwJs1GqoCydQc+/2JuzNW3ZtuRYudz6zh1i3d9+alM6DMbSG08ao8AZwLW/KHbKBv5egSSHqsYB45H+aWJ96wANB3XanS5FvmKYQmO/mpndEk/N7geQJ6Eagc102vAM4FeOJRnmSoS58qGiPhsU5iDV4DwWJhZvFGSIslchcwjhuwOkvFRqlUfM8aQo3LWSXaXOS8oyPS86HHDIQtfeRAENPZ3OwnjkLG65FuhlIkOaCG4R3oThoPTsfTknjxf5z1JBhRBRCum8JRTWqHEYyOMHsnjicS/DH9zRCkEHdzRfkLOZoitOdpZsNaAYEprChcMLrLDZooVOj0vBgN2kEU4o569PY1x02SsaBWTffTVZvVnOc1j90s6YN5C754ZTMM04+J2B9V7j3BfM9YxQ+SSEcSO/QheRAjs1Fha1KUPNXg8lFuUJlOIQnWxvrupG1DPukFu0EYJLCUAhnBAL5/tXKdLHTKyRbm1BaDJSdDLcPSOngECoQH0C7UAOju6rz02WfgME7XNGRhmVdPLjfkdKle8Iw0cVwA7Zp2b5Qo4N99oOs61z6Y4GQy08B6xmWmlNccFSceTbSsTO+d6OfrK7pp/iqX4DE/RSSLhU/tvBcNlrUJXrr4tRK2L6hB8qn/JXI/jBPaO3TNKmVx4g4M4u27svPZfb/XozPSwBbi3qgsU3Q5+E1eZ3rKKolbViC+/LrGjZy1mm+Kvt7Ht9bd90kJ95+HJB2DhKY8jpzvUmXaCE8I6tnBvvao24ClQhwtFoKfsdVT1p80/vVBVyUNAb1sRPpTjjgvUr10ktSB1o0Kn7324E32nKDNkULKlBTRdNNAaAYDnQP/Eo7qukPuOjyR8oRJ2HU3f8ZvHTFXy9FYM+gxZj+wSDYkEch2ckTifXLHwDQaNSrIsGYKzaeJ2jAWsDHy1nStpusbz/O2FH46bUG94YJTVc9Wi7NkKNwt/PbmUJEds2/mP6wp3Z3sMRodI9NG7LZElsya61htUDAq8ucyuzD18z7n0wyJqKxSHJPOpvR9eAoSGrZJrhs9X0LKDrW/AuHAZ/cXQ1no/qjPeoZSJR33jTb8+4dUW8VX2Q7kBpO9ZrTTiYg3LXBPmKOGG6ODoKIeJ70obE3SBEB4AXJLHZ/4pmCvCAgABV+JB5v8dgAz+tjn/HSzi6oBGOCxqYVz7lg2JDM8d2LOQO7Z43ITYVfauEvbDjLM60S8t51edi33fD7bXGFKV/NmYy8yQTzsRnqx4vDblg4KV+/jxFSUE0x0M3RkOTy2FaedY7h1TIL/vViyJ2NO6eI92dcWLq3HQGio2RPh0MkEXRb2xtN7WkqE/MmGSlceDK5OEn5SnD11iI5y/VYbJfJj0jAIR6d4E5/oDAKKwJeufLHmIZZSKLSZ2VfCeHAEC5sktoU2na+vC1aAePQ9NeoQRegyDxHc60XsoW6iFXMkPu0FZ9Qfv3ZdKFHYHXn0G5pmYERvVgbctB4r7GJ6o4+eJxZDm+GkiC4Jw8wbZ5htmKiMAfhnWwGd9v/ihBblCIVp1nuFcnVQjJB915PyxvUE9wZC15lFUxk0Epa7YjhGUtVtjBdPIC4x3nIZJ8icVEUAAgwa7gBJ7alchWR04DXnJCyTVfaJWBNJz6q5oHv7qUMHd9VyfMdj6NTum63Nir/p7n3uLoJ0PwPwYbS83a1KNzAvyhdV9ypHLxGeDbP9LpOnmtUkC/TWOtdFKaZRR2dypMKeGE+0GZg/e5tjPvt6bhQeLl6KdYNIKCmMLx0rUp//YkkiGpJJtEPPNcLX6u7G2JYRRQPSzba9Xp+0S87BFdtHqHCvdxvkTvbOpg9xh5xQ4kBgwoXBCPlGlWplP0H+UQwB5D8lKIciWtdmVZLszP447LqeIrCKf/IoCUZei9vdI+bTHi1PthbG7xIDKO6+YJryCS0xA/4chsPnxVBRwjIJINQ9nuO41273xGQKmDdFEBs6vAVobitbvn1ZH23uZMESZX96o9XDuamK6Kf/KgpQfcj6tBtSX5QEV1CZkrzojQUO/+PHA6gUv2ZjqMOxyDZevUuOjUTo1gtGsY+w+kJvIUYSNg9w6lCqVcKA6tA1fNhvXf438haETgbYt4y07KqSsVXc8tKr3VTMQPJ1JLGuoa48AQJhRdy+BvR3rOvBSV++ZTsCkgDwb80VpzJAOSbqkCj5hu7EHLmfbvkbHZ4VSVluMY+LHxGZyowi3vqaabEBNDdXD3cGPO1m6mbdm0vxmwQhCgyHCcT1FaL//4wxyROIcOKe3YCpknT2YjYtcaX68pc+RC/MyEkAJoBr7uGkVxGJ3ZDGgecqQ2cjIqnH1ZUV0U5cEVx2L5jaTxw04k5xI0Ber8m+rJ7saZuPN+ygx6lHrMJenixAQmg/7N2VQuqmpHDxAMve+Anxp9hosyVm768h0LjWSUpdT29yGfMyfHKKWFjn4beQvUPYntPtGNp8G1nIJbzL8Fdp01C9GrnZjWCJZvK1sZB70onSsrWY585T8zSSyn6N6betbDnr88jIz0ssSwQ1UDROSfzqf1sV8+aKNFiMHkupDbFGZ7FZ1ndTgCHNaFIlU6Dwy8ZJO6J+QFNE5U+KfJfWA944DwusC/BEpsdDWiiFk0ECYn6t54z/BVFEeSrRsq4TAXYTfnXV6yA5URbVIzbIVyIAzZELWFWSTd57L3290vRBSCJSjkji0hpFHM3sCNZPpIrN6oK3YFycolTa4TCJrc801u+kuTZTVKS2Y/XmXvECwCMWnWsDhJwOahZi5cmlV5EcIpGq92gtZk+w8cmFm2I8RGs16IO63GvzA/RBiucWo5n9aBlcnZ2uKaIzEcrD7fMFvxQ00PGRkOCxASoZZxphcSWPfHFBgS9TEb2lNWwmxq6UQoIztgE30WGcJdoN04rJLKowps50xNxzhXkne/rIf7U5NAyy7ZSME/CH3/RvchIFNPQKqv0cxEPB8lDpN/NO1p0cjjqvqEa7xaZakbe50pY00WW6kK43s8cXUp3zYpFirE/K7944O11A1RbitQ+c9TCD8UUfB31YWkpgrelvnK/DhlQEfouj3IM8ouVMaC1DivFeY4XBJk7bVzOYwacQbil2pS2w+ij09l94qjEHXN6VpKFnOEao1GFXR76vaUak+CvKG3I2XjlkQPnwU1rs/y+6bIAUCQ27Ek6wcg5y1l55ay17DnSXh7FvItOONpLqpY5Sezkd+NaKM8tK8MaBqb/mmfCbkKtOLLoFs4x5Ndnjqomiz0iWzc7HMxdH7hZVWYyo/UXAovQOKSUtzk8GFe+HTMsI4mWgC190MpH8CEEXG0GI1MfREdtr+BaHQeR+Xv38xiiTFOUOyQLIiQ16eEGnISsTegDKImVYs0BtwslhG/taCz0lr3rTm/FAB6u0aw/e0gKi/RySn65kjWqtcqRzQT5LyKEdWGJI6Z7+OpOS9hUWruXezmdJbmVLng8yol9gTsm+cfK9PaCWQIFue3fB+NZn7cCkyWqUvTaJuyTc2ESuyn8rXB6k2RhDiDTZ3Q8YOnjuzTwOcThJYthMT+yS/9atI8q0QiJWu5Y3e4BhkZu/nOmri519zbCWxBbt0U2oHW+oMAmhowXCKbOt/tdM3Y1tgUUCflonBKpHvU6U3ZCdq7SwJ7jDPufsvbJPOTcGiLpTCjA7Ou4g0G1NtYus3jjworABGLURfulDRHiVblBdEaB4dX6N8+CmTZ/NhT79735JRrAzlBaHsxRQYIX2hCe51OqzlsNpBjPaDi/gL8le4JHoAdUS1XNNkmoMsNnYYrBquQebob3HpYiChA7uTe6SCWTyi9aLN1x28YiEAT7JZj7UU21eXn0IlvDbhy+0gh3tGGKOWvnOweVdwaxlBshDG8NAyvaQ6MSAZgjLoDaenXpyKC5quYWpJxKByw5Sb5cycLfU+wf5cHiF1dlDiFwt42l+k7G7zuxVOhHGNW87pg4j68HXzg+e4LUwKStit7LA0odvycHWXSW402BVqAsK5TW46hg2A2jT3kjzp6icPZP1xjUUmPxqAusg16PSiJTA56zccK8b1xHdvpj+pO6rvPsHjiT2lUD8DucY3fdiPxgotEjcBwVTeLDWKzp1K8s7m1hHVKtnrRqcRlgzy91296MNte85PlYV1iiFiATvAJsh7qLhI6DsnqBhsQJgmbdOHtgrBnpcRHXg2tgVxbKV7LE+ybuJB84LDlnZhQ+ZtbbAZlgU1VWjtG3HwrSrADO8v6JyE8EPdbMWYBfRQppTmmEXbGhbtx/h3UAWaTvtOjbazFqWbvk8qY1zmD6c/NeFCDeHxslrGlKc0SfvXUA7yHlUqVZ8x14WmG6NXiQLNfi/Wom23SGMkv92EQ8uG/bieilLbrEq6U3m2N9q3hyb+EBrfLmAfPHGK/2QbYa1R7tRuFn4XlI7Xz9PuO2TidNgpe2vfqEoP5T2XDsKg+XuQO+Gm4g4VX4sSFUzpduyVFWShH6AH3MvW7Gg+6EkZoMkr3zzuXLHw860t0RU7AaRdG/eoyrWgNkOmxvTANm9ClEx6BrHCfWvDnSkwB7euqrpJV8fLIaBSlCznaszkqy4244x7Yb9ysS/o9L6DEHRZVWfj737HblwDYCV8qzH5gU/kXI3i9+HnmmdcHPpHnb/fR3mF/be1ZHF317MXD0yZdB3Kuw0P9W27dsdXqlw1O7lVP+YlFMch/sn9mwLqOGLjdmT2AuXUo8Yqv2s6Q1T1suwnwjBE3NY8u82vqh5G2M++POyDG3X33xu0HmTrGKIWtr1OAD3VuFvfoUAqHwGwLYctVDYm/siP+7gDohJ+6faIE92xYZZp89KUfWftRoBkogf5NmSNZqF/l+9aXKrnepP4nDEs9PbP6cHQPg9I2/0EXIReLK32XKE67BT8MWt1CtVQXOx7/9wzaXzl2pd/t6TkN7bWcaOkg6AN0D3Ld8CtuLS/Gxv4Vo3RS1aorr1j0BF2OQLslpR42mJYOjHNd0S4Jd5mZYovju3fVlegfMAxEQAyt7bY379E0RRwnLeZUXgOCd1qdTqPr8lDhJ7PiFx/UttYlIythOYvoJoyUaPeezq0H/PPZCStqDXNbnga0eobIl1sNQ2K+pEKejL4h5MF9VZiC+QE5JLOoUbC+MZC0tFiLIpdNbjBbd4csPI3ht5jR9DVpbN2KlHlSiMf5oiY0e3CnqS0JZRgaHn+3MZBVPmd7yh7otO8nme5NYX0cKFzSVChjicW+FegIwGXBIQDqnuWr5zPbC33m0Gh6rZiZPF3ebb3Z1FbEtNNC1PmCkMYEUP29ybzDX5quuNfqL2H+hlgkIvNtYzGCv1hnPRhRCcjD+iMI5SXMYQq+9BB0jTURw12evhhSdrMuoidAsvl5a+T8Yr7rYDApt4T81l/cork/EbuGrznAXC903Zb/228hAYsjcB91mHRaOrd0edavZ99CNZdIyhcu8x+L9vXp+pqc7Yl24olI/61LymCfx7+OqCQwJ9GJ731kVt6X+K4GC5aiDyhRrXD2cJ0ygRICzZpd67TegiihiuWKOcY3JdQ/rlYjvs1yUjVbMgDfBndtWAzO2nTzLhqiJsRbo3OyVvfaoLdSgliLH/nnorb3pe3vwsgowu9MwbZ1HzEhu+njq/3TuEahhv9N5uMYBDrF3FasqnMyWaRSp9SO0dXU2ZiuLzAJtdMPi1dXYKRJGVP6AfXcGud94sD+qkkWN5dR5273gGJKc1Wk/KPd/NouhGRe8pL6u4G7xC1MvI0IID8xj5hfCWb9i8PlBfSd0I6qTfXKeP81glBC1pZVKb5ClKYCJpYff6nR7/v29ydsYYxqs5YOWYxLG05oifisQ7gyIumKjdUllSGpcF3hCSacTaVhFlF66dJ6seKWtnnogsRwEGy4loiNKzs4kJ0pqO0IPjDjUr5MTwvMugA0jHEJyowYOHCv1Ga4GmIPPmpl4lvyLcqt3ANXcTJ7ok6+2JQ92FiHKY+fOqb6u/y+KnNDUV6FvYtVxXSAGRTBjUhorqguaHUJq7kyndn/lpdao6K+HhE2qhYH9l+u//AWgmkbnjkENL7jymP4RRUOKRVPOs5O2ZdTkctUWDTOLncJHE4we7i6APEzfbQQs+ai0tRRMFz82YuZ09MkViJz3qW9E55L4Cild0sRnLA5sjddNollpJXaR+IylOW84+42K0YilrkLLQRIam11NYU8tKltkrKWdK6gCduHt/M3YFlYxRRjLSD2j1FqE8ApLKrqRrEksa7S9+3VNHbkn//3NsOL9JaxHnpkrtQQ9xFfBITwGQ0rQ1I+YZspVFuBHRk8kAlck2070qWuECNZff23TE4926Ky/nTzvemhoHBRhbFNO4ElF8bLWybKvPWLSCBGYqRngBOPjdWpMNQihKS8V6mOXX8THI5RYaFt2kOJdaeudP4gDdGmrR3NiY+H+K4HyRQDoi43sr7Va8HCfUCgc6slfMbbXn/igWx3KJPJjyBytreFPHhVm5UM6x/LwgqkQKLZGEMyLRpjILHoylLNtPsZ+xsk5qjuYdnTmG755KtPbQ923NYCy9R6eY4zfGaucmL4eMt68uZ5CqfvLjkAWr6/epBXwyBOZXxIkQWnZeVxmGF01tSBoj7jG42jkIggFp5BR/Qq1ltV5UA/ZQXYTL1BqNQgV2l5zTtZNOOOEVK2wedW1tsonQEQ4XiRzKr0fkZSqR3BOj7ACXfDMGmwS4X59kQystTaMInfuFWzia4uYSONvZcQtQOwOcPL3MwYWE7Ig+/MlmApvOZv4vZZmNEBxSnrMF2jP4UfP8w4SjcqOcZGMxEp5LT1Lfa15joK/jKMF0t3IZ8Iwd0DOFCZ3QxoxpdrHZyBbX1FfOcUdAQZgFqnuzDtlTsuw01FBWSNUD30qRKrmNpTLGhEBp36trunJZL2ahIl7zyL8WAxJmUN1/FF3qkFPf7dETEJX9f22kJOjKaP42cE3JPXxjmvlPZ9YzHCyeaDzp+NTdB2GefCCamNR4JV3HFVcFMXNqJ4Hh9yStLAczsX2YZ4JKJOQ5SLIUfrRcLrqmO/Wj3kTPDa3rfH/9NDmEeNnnJb/Wee/hBiCnyryAYZgu9ugc1qY26lQLewY+GMOkGbGz5HfdzfByxVgaCm4oAoZJUsvG8vI3pVZcFRULP6hgPzmfHt7zQ9+ck0/NFKGtgAx6wVaIuUTJOgEfgvCjikFSydff+GfCsogXsPmpiBAwEdp1UutKr5fQcjAWrhMthbQK18NuR7lwjVTpqhGrgRFxkQcNX8tSBHuW9BFOL2B8ks835oPih9Cxp7sKvD8YMERqlIs0BGiQCp65e5Yz49lITMSorlfy+KcUpVuAMYLdbpOp4msTYEPIf0l8TJVXGbc6Yr9AWoz8xxzxIylXl6VxF4WkhtH0U5Zj7Gba13dgtVRbX4vsphAV5RyzgRKW1wMjYeK9kkEQ+squ21bgz5HeRHQiP3159yYwokmAO3EGeAQWD+VYR87qlneZxaRdyoV38b94fr5vH+v0ZXrgC6RsWMslZaQGqN1xEzCKvcXrIFIQqciCV8d1OmTYJFw71kn3cBzZDA5l4rt7FVKxqrvYBFllfU9dIwCVZZ4YiRLYwSpZm7yjAVCN/ecaP5Zzy6CGDpGttWrsKmxB6g1AChFtFmKFBhkKIXdED7UmRXNJeCbp61zdqvf5APXIR2eWM+kEEK65AV34oD32e6gL7bFc2Ljsafj7B30aRYMdzy7IjfhiQVQBf2CTNQCd8PWx53uzQxnNPrl/VKkJM/L9z1bsMYcHwjN1bk+q/Wa4bq9siqjU0ZsKDSWU7i1nXL52l5JycaRTo7tvUai+2QFS2Dn4F9uIqGK3kUMCNYYBsPNemxBqEI5POVFr6ajMyS9F+4T9w9wXptBztb+lFPFaWQ4Ma/q6L12VVOh3h30B8h0AL3evKu809yU4b+U7i6Bscs/UREDWraoEs7UetLSffkTB3ZG9OH25gUtXU4dBp+CiM2QSBOBFAF9K+BvTgM/O74+7PheIZKsBWUY6emam1McPMdYLlLzIl6BdHAy5uLoXLE7NyZw9bSJKbrXUs+Akd+j1TMytjC9ke7YBcUb9+OuUVOxg8HXqHX6SdRef8ERFu/wNHXvglBeUAMMC7dYA6AJfFt7VzPxpv3tdvULTm+Px3UUjxp+JV5ubNUQO5+6chjYeIyWan4/T76SLU7tI2an6o1AR8iUzgapMAN65pJFScW7Mv0aBI2Bou7G2NxXbMkusrxWKi9umbLCjW4cZb9WzGUY8XcVwRlnqqmYKwY+Zfw1smlVfyBasNI4kxPIOC+kpurhAdujUKGGTHWmStRPCcG14SrwwUAst3FHyZKvsQGo59GHkS5jur4vPTQus9lWtHAtSwBbY2N2WbOeEL8T9x+FZdvQ1Lw/g/UzTWho1IE4PeZXlRp2Pm0j3dLSN91ao9YmpjtwRS1QwGzrhBkDk18PWvbkRBhwvW8YMXedJSISHnKvCzCAynoYZCT2uBdlAy0bRSUj/3DT7NZ28iRd6b0E7xWiqlO1JiFZg3eE9tQmfDhYaTQLTyJ7fb9AbO4O1XCgxIQVmB4KABTOgPxF1Hvk0wab16UoW8W6uLUXboJDtapCP6ZhGMpJgumHruxmrb+jMcLyx8/4u4f8Xe+oROFTCkahOkYe29VDRgnN8vVB9KTa1Sl5cazTY+ImVrZP7E2T461mGb6Nu7M6s/YLlVKXu16+nkzwQ2x6vYZ7VWLKaMux/yhkqLC6lOoz4srWBGT3DQUCiX16pyn2iXg6+3TbLj7k6fsvqP59ro6No0AuQZq55ksvot6+33KXo2M4/QQsCvAroHsimaeweOgc1C9xEd4tYMMKOEamF+khyICDXCLeh9G2p0lFmiuuGRr3nZziwzNC2Ei2Cg1VU78nob+rgLEXFa+V3pXgzuK2NbkWb6TkUePlYhAbfeaDze4ZjBG9gRKQrxhLa1mWWGjymxP7Kunke9O6i9QkbYRxbKZgDsz87Jtbz3EZeF2zs95mGm1/RRjy7E1o8/XyzB6rczulg2jqA6DpIDKVhKVFSRPB+OQHFGuv61WEuhVvOepHs1yiYWS/yTLv+exbRPi9UJdO1KazmfzECtNchfyJPJhdQpoSPrhBmSboZNkJaQpIzswCTgQrOo3Y6pjBWIRFbHFnpBpd3Vc8mFFh2EVyM++e2LNEOnH2IOjNQF0V2odyuJTX+csT49srzDDlOg5MaWqAPCD9LBecRjAj2VuoGzGOKTLs65v1WoD9eFF98K/8CLdryzpFuD1b3sX4KFM+MaITq3q36IEDdmfp2uSe2KFT3hPojt50ztGIPBFTtHHQ76AI6pLbiQg3JGt7BlNdPE2CkRuXl+GN0UWhvJ4DzMZe0KcveQlNwP4GOoUutW0fww9GxHd3SZBm+SnTHaKiIVLkNVGUfkcmopFvjMJS0FiSoiohe5PTXnCVJIre8F4oaG4ecNMiQF4tSjmUwRTDIIKWApBb4LE+xq9mtrT5aTyYcYKkGAQ7cRjyPSUDbQMYcgTRJkUFaV90ehHpvJR4c1Fah9AAqRqapZCakmCXxUVzkIIJbnXNWkqjcBOb/iSRhJHziVl058/KUjMNIVXOcdAjmiRq6dl58U7E3s2YLTurZ/0MzUypt/SJ9h+PaNqeYtd+1HDWKB/4ZaZvK4fHqLoJXnAf9ZHb9eFCN9piLm+Pehexkbg6eeAyuMF1GiPxXjnEMT6Y/eLuiaviYDYuNYm7A7LAQzLClp86B3TUmnhCIPJAKG3CS1xnDXT4Z7UShZBKCvTPZ8Mep5Lpz7zlaecFMSy8JXbADWHGfMa97EoND3A8oT6KEqqrEsxVWWd4PGza8HsfoY+kZZx0wSYOJjKtzBsU2JVMn/jamqK6Ee2R/dNxsT6JYPObjcEwycMGU+m+JLapmLo9I8Ugiz7a8p8pRZkx/Ct3hTzd5Nu8KOFpF+AugO/Ew3niJxELy3m5nDSihJS1vpay6KiNuXzr/RUho4R7HWJvp1sfaSaepbJXhZrvHATx7j50AIIZXywXZd6aoyojGg9X74p7ytRhxKzgMFwBC78gA0PLHLydMnKA2d8519etnZDxGTtPKABOxKgcn7KeSJKMmPdeCXiSumXxW56rmdxyrJJbP3PwThuTYodXd3RSGG5NZgNpQM/jIKmQl/5XOY5QW3U4Ib98S0HbN07YA8obDHSLlBuFdwOWlqqGM2OkvI1e7qnFPd5fenqJxRSr3Ob7vqaz2IbV/aGMcETUFTt+znfOS99QjnRo5NtqzM91I0Le/p6awFyWpohg3PAZf0hAO4HjouiVTiw2u01HykyfmCVs6wP+j/nhM5Rmo5t8T4y9lfYxl0/hgTHVXAisE34+dZsI2cMSi3F3OUzJ7srM7vy/NYTaIa3aeGd73LFk3lXr4LEQ7/qXzu/XqnyrSyqIBpKN6UYtQnkrDuiVBswefzgfEdOBebm+591Ri10PjLMWChEOIQF2s/ZfNXsl9LZAWzo68kIMG1yz2HHtnYn2dwEMC6k6nfRNUMrqAeGewxmzGEwj3nmk5q7GBxaKACSt+6XnCgQ1ZzQGnmU0gKhvyYXV0Zd3R6xUULhL8WKgr4TB1fiT1lSbV4+rVueupSKj29Pktx5gUu1bc4my3DemwZcIJuVPtjHS0SEziOq+21hub2eTp9nksjW2cMeJTznzaW/7JKDOJR3hnxclbori4J3Ci8y2k/oXOIBPaXiDtOE8TYeOoMG6DqmyksyqtLgSh05qD0+ATM6jNSzS9NJbO5dTnTfJYmgM0+bW1aDGWgIpe7P9rYyOJg9wrHk8h5Ev840HbNdGXSAp4SdMy7fwHIIknZNf9iw74gRbLP/GGVPWhHDGBPF0O0q9ca7jLqUCE/4iGpgvWx07Rr4os2rM46nR/5l5J0RIrbYnsXZ8atMOp67RFI2UOkE1YVImt+Nz8TtPmL8yS+9JMLn3NsF3UbiuVIUasG2bM/JeKNm8wzdUdngTII1A2guwY2xc/3oOhF5AMC5wHx1XvC41GOdUjc1/BRXpQb++MlDKll6GFfaxsk3LUgILXvXMFVg011a35XSP+zrCA8arwZrev42kOeRxQ9C5qIRb80Ck0j8SqmO+urRAMmmGdswc7CfaR+wFTIZU+MI09e0pPdB+xB3eaIsdisiWCP3/gNfXiEx3OTTlwaKSIZwIZdxbuHe76pGlaclDtLmRyhjzrLPRw25NxHdaVu4K7ugxAn8ZhMgaslx+j9DYTS3ML98h+cQFFTMhcAmC1Rvgaf9YnOTI5f3IBYFMAdVPIGl2A3GoIofQUfDeWPQDZ0EXhnXeOExBc0CZyLlXNpKyAp+H6wA+32NomAMFfRxDp9G7obRW/44s+bhNbxmSPSV8INiJu7eNufd3+4fIljPaDMmmt0b21ja/f11Y0cvvJTCtilv79jjKQyd/sw74KikH6viV+LjPStSZQtomYyldg3iA2PMRUEFMDU09ohawT6UDM4nsxwQKEz4+hdMrweuIrODJbf9asuUUWumkR90pqVwNMIQpMv4t69TXxenhhETEM0wf5UY2ezxeOydRAQM4ryh2+L7zTBkeqD/ZXRU+1YcJG610XeguBEJ7oKlE192z/bVbJsB6tPMpXjfUBbiLQ8hNxFil9jbiwz7Idgjpb6tfyKKVcNdTGEqWOsGst0eR4i1+6gIrwSl/OL7YQ7fhVVR9M3nc/YDwqV2Fs3l+Uti18niZ3iCqsXUPWecYJefeDOQukhSH9Ll+SbcJJht14I96KYoF4i4qrztbztBC40erk/jyM+hsDentEtVDOqLg/ecdLnrTJAEnWnxpZvdymEFZLk66S83CZzMnv2FS7IOFJAu8+7+UrEU7S/CUu+aZH+wCuHixWK6iq5Y0U3yr0V0CxiTkQWGPs91JkCxQlf01PuNAzXQDjzM17bCWK+LYsgkIEJYxpv+ohbyvT8DbXDSLNZGkxdHxYiC+ZITaGg5DCRnD3bcEBX6zbH3nqKnCOD2V3RedhErCxQt6vdXXLTFrvXNLgYvm65VmAf6pVH+gutSjEei6URst4JHEJUh+pMsMC6yHKbXO84dI2NXIm5hi93lcbwwAhYzgSic0rfLbG/svYhNTnUTxgiEidg59bmvvvCS1eUPI8YMUT9SEnhvXMl4nKzkH0jcOVP4ddckKrWXNDW9uJQuD8wOpOShNRJN/0G43JIhbGTf+alU5m7CJMfnDmsvWyHiUNL+is5PlMpTYGqg+cvYTiy8XeVKFL9xOZTu7/SLD1ZYO145fdjsWLKZnTBPZdIUs4+wcRd/cUKTaXa3URejWWfdvNG1T8JG+MhELsys4WVGndlQbp8ChxhrxpyDHm8d1sz6zTMqIN3EXYr/SZDGMwGIUywqimIPn/fOaJ27Welpq3cMW5i3g9vWc850Jah6wN/d6duqlJJU6BL6BvsLOjuZbxtOa5+B/DWZ19tXLmO7uB3nexcCYxHGmW33Zi4KDbEiIGWCIgNQcc0WVfzUjoLy8wIja0+52s4JJ1sYfBY3cfCtGIPvxnCkSUmhp2sAoNTGIM4WDO8atzF4x7tNhHrUwJkY42tT4kZrsx1YjTZoTU6UPwQQ3Am75yOZdDpd3AdAp1ppTskW+2nXsN2KSGKuhJHGeJfuVz9KS0igByrMCh+CXzgbMODtv9Afki9Y7hGEBpVhwsxHYmg6AszAmuIxg/llxfKG9VUHGRFB+K2+HsWZGAbnJXkbkvosgJrFRgDsS3rKZ5YQ7cs9ZD0/KjTvECcZZjoVKp2093IfjQEGjelaJWEVscqpAzZRRjnaBrBtQl5FqV3bZMP8J5Vu8DO8fXXhsfhHkf9lYUJDExFfwq75yzMLD+kG9kPdwLA9Rdi+arPyEXkXvAtipHNNgTMedvB+GthKAAhd5OZXYWI2Vj9wA0HKulx4wMO7mCLyYjxcw2WIIOvvUSkxQ7pTZfajgK/gdPn7OU042OYbFHDMYX8BC8LKM3GiL+KOLEIt7bgDxklmZ9Y4I5KzphsGiyp0GErOQx2bdkEYq/f60xwIteDGNxbPN419D9f/BcwkhrBLkLkzMm1qYlMzWyCdCvFtm4iaX/T4HLxoxgLmRokbzAy6vhEQbMkG34bj098rqc6KsK6+mkjdcEVQ5GcySlrQ1fHD3+S/aKcALXbkSoxsqTzfiBIivCP9mKWYm3VSij1eMjICKR4iTtn1iYRtBVx8qIBH88aFUVHkUphHGClQfjSMXVKgjtAk3f1355uSvWekMEtnJjOUYKNB6fuC1ElhOWVNQV3GwaljRjL/iCKC+r3eE5+B5Hd1AVdRbug7cU0Nz/DQ45J71QIc5BKGarQJwBCe6pXfuDfOaDLbi8SrG60u0p3w3uhBhnSFgIErUb95VJHJt0sskFT1LsCBS9flYgkya/CgxAmrU5gp+D0A25T+osY00jUnrwQYql/DlM2Bu1oY5Mre1TaAHvwcL2wDcFs4q5K0ByAHCwVlzM3EgUd0uS+r9n6aAzlR4Vp4XUz4p6Wx2lBUVHh0QDwbZ4HU3wV27YPclcJTMCv0NDrWDYqsw+Z8Ccom1cp97YZof1dFpvrRnONadgd+nEnY1ytib9lVIgSUKLSUf+yBoDy3L0FEsRc5zzwG6t/8ma8fRu4/vDTw33qm5RoSviozctV9EaJDb4OumsxIcbBBpcoF8G/U8stWyTTKn9Glmd8bhHR4E0yW+UnLOfEnnFeM8+v/oAeU3faTzg6W0ATLq0DpyPjuh9TX99ICVgFse0HnfUhqIvaZG0J4dssMg7gjmyC2FcJgJLVtm7TND+yyhAoEFiPOT++FYU3op+McXRGVNRFfDPQRGT95LgphZCJnsh1zZkGtgr9odAWpa+82RBd653N1UqjdwBOOGzUWRKT2JqIn5W4dNV+n9OZ22O9PfFd6Lh2fh28bM3S8eH/+/+RxaTUywKAKo1a0zLP0Z+N/b5/l4YbZhm6Vxn4rev+WVn0osaDGIr63/kKsyhP109KPAGU40IKHfBLasX7/Ue6PRiEggAAAM22bf5s27Zt27Zt27Zt27Zt1w1xgzzbafIr7vcXRhIlw2vQ8eiQX6SFhMBen9mqE7cxdYlRMPng2nGmsUCijTUAoV+j7bvt4oIXKpGRs63FUjuzgb8Pq2T/nYsVlJ3c01Rq29XGPtCWuZ1LdOzk78yrR7RGyNhJPYV2WQtk7sMAxPWEJykovzhI+d+r890INfzM1Uyn+VH2t56HUh9Imow0OU/9RwwRzMYS1++P4ELAjfeT7u7Bi5J5/pR4qiqTgspy6BPJ0D6vylIKaIMTQ2ODmeY3wBDDP68fLEMEYoskJ9s8ftsGAin0zBsFXoIa0Wu6eCvQjOzwY6wJL+WO8UHoL+R+C+U0t8BtK4fllGmAx4zxCTQNrfE9WKXohq365/GVYeXXSIpNjSIRlwiS4tV7nGlLlfwsmMlxTKtqT94BhCmoGG3+27W5EOaZjPUE5gknbBRfmMRcUNnjEIb1wdSOB77v48dxm7E5mhKnmAga8x4FLS09f4zZ1rjc5QDo95ZvL2GI+bGRUwdwCYfmczBAbpZSbDxxdnzzEEvbneFryhmtQ7NW4aDmaC6BVDGf0n89J9/oXORnDm5FPA9yHgVfyK0/EgN5XpJMuESPSfVOnAYYq//AuzZbf1yns5mHAbb+dBmkVdr77afy1VIghRLfgEJoEx7EWN0fQUyE3B2ENvd/98t4gKlX9Q1jb1EAx4V80WQvY1Dk5KRX1uc6Lsba/Y9lMvqwdhfPO0bhb7yCTFvuQRlnT61I1GxrA3c7pFTyjs+HslcXgCcNjnWRFj9viPB+Op8hGX9CCquKGIibgTBPZwOdmy8JqS3whB/SlG33ANpQYjwJk7KR0tWuqEDR1qn1AxemgfIORjf3srmQBP1ggpbrZ+Y4hxRX4MMp3ho1R0Ec7S8JwZfyH1VDWIdXLI+AZ2/WTtx63SjYO107Oix7lFC0Zj2+AMqrGluR0MG4i//C3zda7Bd1tr6d5n6dhaFtrNWCTOBZamPmMfe5QFnq6bXYXMzrPTn/XJLexZOiLu+cZ842qIKInMLxuPkA9As4yPY4ErPBEmRD64s58/ZDdx2qjHRzy+UVXuxefh2KWIzO8RxIWal8PSgUN3pAyderL0+qKfojrImsoN4qNv+RuCH7I1I8iASwMAhdSDqq3PtmplWOgxpjtbx9wn+yq/wqYJTheiPnqatfo6Rlrrf4yT+47VhM2BOAqQj2pHaksnercP4ZzXVL5ry0If/SZt4JQU/jTnjCbEzfHD6GhE9ucsLSBD2ZwnIGTNq6a4Oxc7DVPLelT5aeV3x9KnuhramvmqrjMw3U9oe/WMSSgfWKREnm7+90hTXpxGWtB8b6Q4X3uIX2yLynvKN1pz3yyacHBQ5pylV0sZVjPXe0gJrIEz4uNbQf8p3x+Plc/tVCwR0ECCYr03n+GebaOK9vH9vVIOwaB1sTAfPhm9osq9EW3aHLOG9wNEUH8GJB7MFm7Xt71dkj8LO/0CuV2OYaDphVlf4xXeRWSiwI6/m69qllFC4XZ97zjo1wl2dwqT5e4IsFKYGK13PWLjjN6YosUV5lsx+BEFCfRGQgeMVQAteMzk2ylElkDZNRHU1PJVshLnkPQh5msSb0/FQFQzvw25dhWFWT54yts4Z2Y5vSzae7O64F367D7OTvMQtXMHxgfduMtKj3OqKDTT4VdnowYzBFTyNi0lLLrbZ0+TSs1vbA/2yqYgpMDFZ3gfOJTy4x7eeDK9YWOhmAAr1Q9yUhVbqZ2G9X1o1HSJrgHuBCY87DVgMiagFkwlYnA269hrb5CQPWlGfrG1SyBgK54psQTVXAFyo7gpTzVIyx1jkYCGXhZ1IsWLSAgqR0QSqLjHV8XJopOqSGujAXJc6tFY/5yLUt0zaGCdGSIFnrmaAoUd2/OGwJUlsif+X4QjVx4OpDq2yALw6SRJXn2UQ+2yzurvexVROHBorWs2ft40uZw/z9iwqwRHF5Ll2sodcs1LehXOeFurDQ17wuuadF4LWi+TP7tgFyvL3ZRAZiqRGFov8dsvd05TzXbyF7406ZBC4U4Y8vceoMW6ZjhxexHRmJqwmGbNuLApgiX1ln/7KAiSn1Q/uuUg1piKjwJhRXpDidBN5th4T9mGy9VL8aaJaAXJPE+QjQId99yQxLT2GbRzb7hDtA0TN/mYCT2JvfITa2ZbB1Cefw9C0GIXbdW40FP7kzYR5AZ3wQOpdGuJ44OvwdNX7ivJ0UVKBfOGgm2ZkTJd+RW3L3v70CizA7dJVBs2ECjanjyKmI/EemGzUbdDKl4YMV1C3gCj0j/Ko5PNabMdfkoJOwx8pTcSx/i8izlwfFGyNS2F+AW2S/LbXm/NqxjX97NJ9gj70gCoA3KFaF0sWf6qDVhJgis/RTxYgzvkNcmiJMp2A9JeWG/vDFzXZR2kpxFBG4yKS3Rh6snQU/3+hiG8JGtYGjdfqmy/dPpD5eZaaK6zT+8gSSlhekuOiPuNB1kLRl73mZ6laN8brtuPcRLWp3TyMpgJmxGtjSNwP1KiGZvWrhJwFCdIUopPKiK1wEj4EtEjXieXcqeZoAGRE0V6dADyY4a14N4/BFVg1fo2C9b+l1U4hHF3F2ur9qZMzQ+qyzmTaikfAtZBiSguZE5On7CvM4fWuJsByBHveqZ7gu5YQw1u2bJpG80c1c1oGg9GXauG14F80Jsan45QHsG3/4G7uv7vfcZdt2xClQh0svSSiaBEDCm7gLT0cPR4flSab9EFaOFcKcs3I1nvT6qn6wRZjImTUPcrWN39Q/IhTXKZmgHLtJP3xJezOgwSLP6evlh+Pkl9Zl3n88stFXWQMFWPrV+seGzBEB/05vgEdpvIud0Ay6Y3KjvDJd95oh7SJwVZtieNbzsU0rYrQ4zleyHxj8r/SQhRVXMyXiIIAH2TG0eZnhef5z2yVFgke13BqxBDlQMS0Tz2XYC/reUSNH23SgnR+wSnNx6JAxWwCkjNjSmMi6773dvVfg+aPa20dIJGy32HzmZWp8GyiG4mhU3t5mbNfv4cnLbz22ySdjz5QmYh1x+peg4O+DoubJ1nE1KThHBRNekDa6UUm2F3e0NQgzSxFYo5M6DICpdmZJuOJ5q578XenJwOA3JPe2zjQz9SpPRCjDqzNFDWfHQd2ypGhilfeS0L1OsHHSMwKbfa8lDmiVOBXDbFoGRwYEE+Vw0bMbwFrft6s09eBJ9FnlPFHMBG5GQRQ+gyqFnuMEDEGsuIPCMZZTi56GeVi2XwoEkpsqaCkjIL+UPyG14rD9xL+MnJzgYCiJwJBb6HRbEzMlJZ6wHfgC7iq1CxLNKYhyltw9qYQFNHXJNxUtGVHfdrFv7wNxVMoZSnUqueq0duf4iYr0e7JtG72xikIwYZhJQ05Zy9r9gnoIOiCioeqz89AAwZPcO8ulMY1395MxZONqlSAMEFYuXOSr5rj9GXsC6K+afw7vy/VSnC9ozIkVcRQ2JDXU8R69FosfeFtP8P6L+C1GG7q3PFYGDCw+tc1abldPLZnpwQyz/UThcGZ+gM/6cbGKwG2aoqUvi2jBYkT1wcNkYrDPZ8xgJUhAkSkjgikEbQ+s/MEUXAd8Qapj+28Uul4yMDF7JQQ9w/vkFGG6w+pMiKaUqloNMWthDlx473wLlRmk1nwxGTuJipsP9iCOrlTmO71NQS3wRo6qZ7TH8my6TvMLWHnRyf+Jttx42vPfWOqFjx6zSDzzs6lcQ/CL7ymEz+o2UwihaziYsKeKyxCkOr8QHrvfUwVnQvLNjweg8dFisas2K22E2mKJ2+zgW7kAlMi+wg5gKw1Zw9yztr0D8DxTKCYBRJhuYZbGMFxisE2FlZVLnRUEnqv9R9R0YYQgT1uhpWlTpHhiZocVEvgRR5Ce74LOeGjXWr3hnYfNWYUr6x6oUtFqOZ7jrpW5emqzkVZLbnCzxavcB9SdPTEC2Xd+n0BqYlC6q7X5I8cac04SQ5P70hjpVhUAKrNjykPsJoqlab0lLeMbUi93XGKWnOod19sXkovyqDnS+Z3uW8l1MZrwND3mM47dq7Zk/nFbXlMlaIlEK3xyXwPiSHLlio122veWm/JaeUslWXzGT/JWrKuHhVgYykrX7DIvflkv+Ldvs5dv8eNm1KB0jaclikEuwY51ChcJ2/Ie87pjlNJuD3Wa5j1Dg/T8X2fzm9pmuNIsoLKNR5mPOUpsLK2+s4TKaec4qUi6qJvGcfcT71GX5roK07IFhAuF/x7nFmggmQESK4JgFGbyNZjjEUqgDqMct3DH7kR/iPhHseNSMZ4iYaQNItxFtIn0UCJBa16epzQlJWmnuSKRYClnd3xrGf1zic24shNFnxqyXoDYptQCDRma8VEkuRYTbpzfRAqax7mRpTZg1OH3DaICXTwCVkoy6MfO6yetcDjk6JCEOKlmUMX7voFBDjNRJm/8rfeo8dbTa4NRzif7yHRiOYvSYbDy6nEU8lMK4ZJvjzfsMuyCbsJfJLU0BzeaBo+ZBmL0UNK6ceTSc7WaBm67QuafmiUA8kqgAXdGxC53i7XW1Skmvad086jFqNfJe+HANhmABxgBlD6RxxVxd+ziIJln5+9n/18IdeV4rGS6sHxfYgtjwSP7utZINmlZrooE83OX+S6EZ4LYRGR4KVB1hp6uFxSmKsC9tVihqJ66v9y3nj9Q6B9Ywsx6hNy51Co83Y+EYFjQOA/LyjAPt3erc5EECn3zhXUoejsTE5JS8cJxugLm2KYkPbuUhgtQOD7Y2F2nW1XalyC0MlTLTl69AEQDv73GqKMNN1GaqRpfs7zxQKw7TLTZC2rn9fc6QwE9vUZ1+M75HKqcS6U423a75VzCSWPS7sE/kw0bMP+Sdr2WmV8U0Vw1NiuiZC1FSokOXK+4XigsnhFp4mn+Us+ji+FlPLgsS6DIiBz7hH3tUZSxTIacAQSnu9R/n+gn3mzRAIzCFJqeD+oLL/rvcLz8Ek5A4P5AfXvxsFkSm87xS8OCesDfOsx7+2dFXtbilC4kQXLeokIxKxvS34RcjxiCr5tTvjsGxpwB1qOvEKTHsWpIT1PYjBuHbR12FHm8egBZXkXB25XiguZq90R/G8dqAHeXpS52TPIohOapMqHvw1qrbBoq6XTGLsPqmQBfyo10oAnWcUgGDdviwWdGTEVi1Usi0J2inGe89VQ65GShzrX0VRJre1krPrxrUVmz3vlwI7cNxDDw0/WTUIEl/SyNPt8ezOELDzrQROnHrEYC+xbxfgaAeOcfL15M6dBvDbgymsYCE69szmDGvCmiEYpCucM8zAloBjVw0/cjseu3svAGMEYWeqZgPDGqztjwAszAeLF4F+dj1zg4iBYBA7Y/3cNBkY4EvmDKu+TamnMW/2p8/bREtVh4NubTgOJXnbnsjdPJR/qc6rMHy2Ub7Ew91Zoer38eIdlEOd1dEN7MJTSJuvdK+GMl2MTRdJsTJUaZ7nZvpa1Mndk7mnmWLpJIDlEIunXwh5hn+2f5g79q3mI4GLj9TqE5lTCk+4fy8cu8TKbR70mLJSP2JKxtzsj4+er0455kyIEqABzvI8WKKYPF5CwHL1n/IeJ4sKm3EVN7FepSvTXJ/+2Gu2K5u2qS0Yt8S3EiNJWY8rrvCXWid1AHGuZXdmuMCJYg1Nx28nY5EL8dn9bsKRB3YYZFOw8Svkta3M9mE2YMg8rs1kQEwwQttRJ8W/qfFosWj4IO+gn+zvfMFf7zq/lNIcPEo7Sa8WYnCY5CjPgMQ+m035VYujwOBQ4JA/PZtdFWOdObQmksvlsGx9GcZ4R084YqgAbamwOU8iXV7y6/yMRJ3Y1I8Ii0bBXL75eK11t3R+RhICx95L1QJW4YReId26IPVXYKh7/+TbwDlCGa3oPu3mJGqPMKlDe4NpTZKukP3uD9lx3EG7OuN3uOtb0l1lpzJdwMaXAb6IA3n+DfZm8770BIJ0xed/5FJI8xK4qODcY4dtHK0tUg52lgQi7l7AkNhJKR/hh3aTtPP6nk/J1XubAGmogfat+D/L3+KDyHZADU0V3ZlXNGN1vLa39xq7RATWDbsbGR+6Zbetc8BvXIjJ87PpCwTsY5/w9CDX6GonXWpzho97vRWxjeB4KPp8Ea9cDBBB2sA6vG8tTZ7wV1RalfhdHo+ofW5tRZhDn7fioQOWjAkn6HICMFyMLCS826dZviJp/OPN1UdDgOq/Pu6cvdeYDrktG6Duc5bLCSWilmn5zNHJq6CHvekWzv1IIqWDNvzmA0Ptvs1qJwhypvcDCkWNN8cBZ+D1T1TTXW36156XPNOgd76PmO9+Gm9yXV6QJ/lP5tAWUiB8b7QrkXnKRWUSDxB4tIoOPUozK8Ot5fpl7i6FwMCV054o+N/PITviMNK/+Ai7sUDleB1IDsYaJBagGNDGefG3qK1cmpHK/XUrNGCujoPCWHramFbTNSs/FIJKVqkO65yx1t+5vVB93j3kZqaQ9VPhf42LuxJApfZAWFrqrGQvfI+KvS3K+RNL+LVLP8I+OWrp+y2dJyOcCuO242tjz3GHkPQh+slq5QxcnZWoU0ism+PiGGF3dZaOvfb7yCuJSGV4OfydMt0nHE5DeDhIEyOkwu/rxKAcyeXbUeIX+6JYVXeTVqx0NnZB5Jy574+CXCq1dUzjybrSIUQbkQd2I00T93u9BCPCkiQgvuPseD/RwanN4vGhzpvfv87/upUvS3bdz5XIUZa8qUOrLRDEeO5HdaYM1aue49t0cqLNIqIeSPvfTcJ7IhE7R5fpCHaN/4/BRT/PHDMQ3IfG7KkcJ6EGj8NI+6HxDzXtbgdguUqXRDUZKJW/4d75OMBSfRn3F6oCBB1go3cDGMDM7WWkh5omChowUdDE1ntjby7hKqmRstYS47PHS+6X2bB/lvqSbZnIvubfLzYjmh6KTPLgzac2+z8796SQW7fyrIOCooM+oeCUuQo6PolCctT3zmkFwQWoDZhYYb9p89l5BLOO2TadHleeKaFBXy1HVKObxNPYWXPc7wquUW0OlGzY/qbHcv+fHhRKdlJsYn1BYxo506sYtUA+OsRsTVz8IliKjdPVNxLskOnWNUgKcmySCcImDOmiEtBh42+vTzOw9wz2UFEh4MexKUp63ENEpm8q5aip86Im7oAT8FBLWSN41DdmGe3SF69q+JCk+jrtl5cDgiZ8O4VX7vt460OepuqQqly9Vt3GsFYboMvhw+j73whFuW2QkZIDqK+V1amwP2n/V+hZhFQp2nL8F4EBmbgu6h/DAl13ycDv6VnEUEwbBuGTFFIWcJEQ9fwE/SpOuzDtNN1N3YaTfhO08/H8vyasYUqXqea8E35lwDmDvtJMpuwa0p1ggsCGp2piY31ZS0xpjbgwA+LP7YXZGrLjo0NC0ghArthpRh3LzP61fOvHDalyMSxa9n+CGfUQyaTVuBvryQTwFKaR7k4jnJghFCRuJ7PCvdebN1vmFyFi2dUcXIj12UOCT3v+hNa54HCUwbFM2g/HCkBZDsHag9em3WpYI9u5dpQs1MbK1UhXEDC5ODg3u73E2Za0fmKUl6a+3busXd+V0yMoPm4OWcLjdJu5NONdcwn3F16otq4rqSdn1Qk8crOxuLu4irkVt3caOnpwygg0N+3r3YxaY4Wvx8PpNXSGBC0UAY7isPSgAvWaaGm+kuokMOxMIZAVBACgVQDCCRIjGRnfVgP/gHJVJ3nUvyRCBtPjrcdgKlKznuX9d+2mpA8A6RpSvsGmmboNqD2aFI/RuFWIl1obx/SS1WIOJKm4x6gj1YdJKgDCYMC467ovFdfMnu/CKr3AebJE6YqPzslUFqRDMdiB4TJ5zAqd0fBzIhQYpPwCG3S3xsJmq/EbHSs6MqbyhxSj9XqaXlLy/HV32IzXqhv2qe6rHV/g9PvxiAgIMZkGvRJh+dJoyUGeAYL4m4O7QiL23D53vu65CwYuOXwEdRdi5RKedH7Yoj6dJVQOsNQPGLH9Zlu0uV7HOfim++Ru9JmYpzyiDaq5r2vqOBlmEgV8rlV+HNbPxl/FSVi3Iu07lVDvqOylbOfSYqZSLALixfVUl5pNjNDNkT8LywXAysZLVKIR5Ww1n0MOGdIkq7/ZWSboFJjmLgyxKZdvJUf211+9mEI4X2Xkiok7yIXW6U0/1QO8rkgteAfA6/PVOdpJpwhwRk3w+13rNWycD6ND4pjr2oBn0YPlDmx0PslCVEPF6soyetvGSD/vnWOHKnEySRq77ewbNaoB68oJHTMvIKIMmBgwsgi4I9XVJAPs0NwiaIOOMkjMBDKUCjzKLfd+DDpWgHdOLMKFqmuG/h73/F2vl1EOtlxI6reX/3W1yYIpvlfWKBLOkPKOnrKThYv4WCBY/SzTb3EGzpNQ5W39ayRBEkpsmS/YM9tAu1ToA3UHNTbhjN6buJK68ZVEIGyaTzYQap62b7VeqtjvNdEJBynRJi3YuVYOeN1553E077jlrgQbP7ZGyCLNIkXTQEWwPfigPcAX0/cf6V+rXWtN0fMW7tapp5XbAfkLw438pcsuoo+KlrWbTh7uG4bxLlcBwjUXdFcSulDl95ZwRQxu9nUSue+DgtcGfk/RcKB48RvVxWZpcYlA6ZWh3x3HYbXTZTU5x3frodv9qXQ1X77OBVy0ucGiu8nb2/xgEWafP9MMmbVZE2G6t7tPpq6aDK4iqqMfu3btRZAW6hj6JFF/Xs5p9I0Xdq6rEH1n3wLQGPOmdK+1HnQVD1c93Yx0MW0Wdgr7reCn30NqGdeKcTR4t4XTj8tOAuXrtNoX44ZnAYfCgkd9yCE7xBNgqvAqA9TTWKzSSTlppdrPk9YEEOZJekQDh2ioG2UqdjUT22oThNIjczBH5PgcL631X7vguYzGlOC4sMPZ1XcvU5dA510uWAAxkJphCXLlfg+Ccie1eyHNjwOZ6lz2k+7P3tYpTvRX8s0Q3WfOz6ltuiz9bgSm3okJ2Egr8iZoYGM5rydXmeJ8zT3yT8o7lzwBcj2e1Oo57+Nrda/jVdGdg3vdSrJLZYXlc1VGYakjsaHFMJdKQrxoCL4aC7ygxQXYooT1utyQKta7dz1eCZvjxtdQImb7FzbGxKpuWjpFj37hfoezNGnxvsWsedYBRtHzD9BiHWWfeXQ4dYnE3d4/5EHWG+O9q2WCgg7MUbniapTDmsQgq2JbkrlG/m2EzYnSeGSctg1e8YmqM3MdIxmjpLsKSlhL7WxHnYDzZ5dfWXUCvQtXsj6j3fb3oThHmBnW+7MWYasZsLz9mnWbiJeXUpWG+1W0qb5qz65cFGcl3Qsv/y2h9KjlBFG0vFblOisY2vRd4V1EHDgUGL1vbMjSNgG8lpSYM3V4lbp6Je5RAmwp4e5Z63bJ38taR8WQu0CUNtqCDgAJ4Rzt1C4kcajPimANDd+9eucw4LWgXJf1PxA2sQQY7TnGDbt6hs2HE9O7YIlZcIF+zP1x3Nb2Ksfom1Fp5d1au3l0fls5GKPau0/DZ5Kq50SNQ4TQLOCNQp1K1PL9+Ujf/F0jYbAN3hegwcAw0o4wA1VMqmPTB5J1r37lWeaNmFFEI73szCtcs2ur/xPmMcjLLg9gGyPHIOiT2jXTSDRIMsayiuVWXTYgOJeDT20169w5r+lF3a44i2aMHXQ61OjehVM8dUyyPfQUyWmGzV+8AXiqNQ0Ix6gvq23Je7dRKz9JMN3hrEyI2/2cczeaJtANIsd92BmIDs0OHH7eSsp6ObiS8V4K6xMXfsUudNv4GeQatNjkHgU8KcO0AjgNr4OHFavLzHOAmcgiYzKx3Vi8OqMihS5ehcBtHpjzzNP0ILwsEEhEwdWbIQP9GAsyhXY2vT2diKSvFbFo3LAePNJrhFOz3bqk2VHrwjp93pEeArfwoPV8ACe2tHNwkCYKKSwDPOWrZCr97t3ItateTpF5Cb6r+VuoFqXVEmT4aABk079MRSfodN0uu4ZhsUtS4dJwJspjRGPZzQ54BvIhN9CslAxsjWuqNhFuFNUtxMOJdhDE+QaSWAb12qhnH8fnIB8f1RGJm0d1O41yBrpJc4Wvc2xQaHOJuv/iTiq6UTANOfgrl78K4bw8c1wuShl5W0ij5grb6XU1BCygDAFL4JYdnoGKjpX9qeuTANvn80EVRhB/dVURUGxjbw58G7P68+FRePHqLYc62P/vt4gIQAUb4lzCz3+yzwImPVCQhBeibY2nvGp9X0wbDCWJk8mzxdOaQzW+PFGAhAfktTA/tru4RNfsRvAeADSH4tIc0GIL7Q4RTbyHa/IVKBsAS5p9jurT7d9nKv/000b0tMgHLKQuJeZ8QPmvaKLNztyJ2/53G+Y0WgDz+LCeXFhLac/odj0rRsT4imatageaDKUwI9Zix9dBXRrjTX4feKxJEpkO6j7eH8gW8MxxLokeJNhJPw7RyPulks7+KSRi6P54vdCa0SjkXYGwkjMk/JdKXo0qYm1aSzNwybaeDDJn5SYciaH0NmQpiuVkrzsUPNf88GWZe1zLL5Yj71Oy0mzFEIpXn1vuYVQX9lS6/J0vFhxNwaq03/LLXOJf1pEFXgRv5BuXt6ccK9oxVfiZZFkvf5rN8kF1kW2GfIgS5FoveQPtv41zVfPGzCY32JM88jEJVtgkEKPFD+8h7jSl9hNMWa4qxG1BuapBkkNxzkBV2ucUP1BAz5KCxIdcZs064pvFT+bUfSH/xKU5mNvSMW+37SCdUdVJRXdEGfPanazG+FarW5Y7B0L1LCh0TYb4VHrZjWefwNlW9M7zpc5qx/XNVn/QzToEpDdYjOlNQZeVOzbfZj4C5KT3HEklYRFgpeHmOAqsMLsG5o4ud0MdHHyjdWgZrYtfYX8BzeuhBQULUSnrKKA8brHYjqTcQxf49pXKNtI6Hp1QWMW4W7iESNB/5SOHzl8yBLSCjhkfrF33lZYHO8VEUqqjk5oSXQveWOMC21W6ZmzuihWCQYJOuDSaiLl513f63xWcGwveva3qZ3zMMUPD6wQp4MZWKNUBU2eosJoPNC2HxcX1cR7uER19By1pn0WkovRAboNLh+YNlE5YY9/bTt5/FDRYdpwIpQp4v6EapngXVaqvkFdhRXAADZ54yaJekOJjQsCuVNP5upcR2KYNGZpd6Gud5iurrKqndUT32nUU1YyMrW6OuCMh93BBAVgjWmezLv2Minl+Q1/8q6rWxX22+Z47fX5YqrTYb5gEZQjnPfS4D4/U9i9BX1uoAkGO5+17/t0ZXZ3TKFJuDOd0j9L8YAlt/kp3/bhzza5rY2aUxrhu7CvyUh54xdpfkb1AgFL1lUj2LDkoLaXTEp61n7i0ag2NqyAji8dJ2whLyRslWkxNWeSe/gpQpEHPtzDTe+WMgXlQqtkrGIC8P1nlUWpnNxWxxJyXLdSRWnkkwIGN6uB0Ac3Y8dHQ6X3bRDxy18u6OodB5i/ljNcJPjCqUp8JLvHS0a9vrryCTQoK4HeMQMEsg6t+DpPNAZSqsgiNG5p2xnTdn4Xm+7hvGXsi6HIHS4jwsh7fuvpVtNswhtsvLMkJ7h5Zi4gEWtV7NNWXkbXRkwN0Ywwu6hZNSkb/PVp+hYclvQVYQAHs5B4tYhTrBCQAhKVvpTdNfzYevu1y7NtPlQqe6xaya4LKDmFj5auNkgb/CFiJdZOzCqftv2Myqun2E/ZYP/MuYVR3BhAJlqTbMrj75FapnzLm4IsX9d4lM26Gwd7uymBRa7EXFgSbquCUHeip67qa/mkjRA/sFSZIvUfvrNVX0sZGGAMIXw9fa+4MViZ+8YP5jwlgpzamG+LpdAr4xYPQ3WNY+TJ90cw0A0lFFrkdZo7+JrE/00Xc/M63skDnX8d9ZQgcK+dvA101HCAHFY36zg7Quf9UQbCJvNpW78OLmpWKvTilNsZJ5d9srWlVj7H0PX0GuubjWs3vaJqwawHtl5nPZFitm1mB84WOWQqvjqWY8fstq2RWT3/cAdE6WW/CeZIsx3wMoZwZFtdpd+axyOag6zaKSMayd3F4Bt3L3OSJpperTnTHk7KH2iI1wh4D38IOBvlAsYe2OJmTY6iIhnhhn6CdgE0X6zDZlDMZB8tiSVHupPLAMjoSrnQZDNzBv0fNqtWeA1pb1aIbxyXNcQQtwe+94yS6B0TSUikmWTHz6YulqqDThH2jRm26KWV0YOg1MPIeU93dNvGfC9sllV2KlYUhuvhXSk0muGakm+UfdxTFPzKrvchcoKwAdLYb1YTf+lB40VJmaWUi4a5UgauV+hFe3XKrJwD9Mb6iW7SbaLJdpkBsBwcEMdwWBCZSCH2vgZstK0uc5gdCzF/Du9LQ3iOFRztdmvdZHuB1hiPXTC0hanSkocLBaoM2sxTECwano/CmsARyfsYGu4s1VmGx656FLfjersyDUqMYl28rhj2n3YEHa4BpeP/WtWmCs4OR+jWOsQ1TcFfJ+vECQKgYZZ4kiCDuTBKHlxCVcWLSi8cNlnZrVWpV5ocBgxX3GeM+EEpP57+S5t9bxwI0Sf79vMk7oLYP5Wcqk46kP5PEi1oZ1z8f7eARM7vt5DBLrm84t2cpajAKWei0mK1gy52mARWHJpZtD2tw6MFBUUtOL5BsIWmiIE6QIfqAs/GM1KCONq5qNvs+V9p6cT2KwKF0JXbksbDsMvHcwnugEFnG98ST4qg5rxNrUG+6n/48vgLxgxPQ5LZhpDCHEOuZEl2IHCqca5wOhaoxWwhAF8IgvH0qzNzR205N4y8RvweBnD/pljh+u1CRSeqctyHC3noh5sq2va+GYw1nrQyRrIM4/hWH5hgR+jpwRlhzjDAcBCd8ryTBscrWyih7VUuGZFE1ZV+yBVlEchKqzxkdwiBVhR5rK8EAEpIukwzu1paOCh5v0ItVtQ+u/qO1l/B5053Yngvz6NaHty3k+wu09qUKIOrXahVnWeX14X0UgRFwsUC7eshEdDmdQk/6V5ALhtmlkKgtHyoA/7HmzZdk/vOJWirDQL0kMr6K8dhijGvqZOVph7E2Sw6dba/QoSr9sSSoXbBJGDvTfyVkh06dg/an5ZdhWICM2mKbTuagaOAWVJuKi8swJCeqrJktFQddQbfy6iDD39e1fvcamXZB7OBbcl6EaKa2ZaFnrbfuNdBONQwUDAJ6+3e1+UXPmedRDccKlefkB+YAAf1/e+08VIDJ2o2Id6/FOtG692jX9oyOHiv+DIAbAWQXUNeIpv0ZDtrWlFvQEeO28j/gnukrq6avCnQAX7N9+Qs69pkn0JwMLFyyrMvD+XjnjTCogiK9h/0mRcoxzGWon9OjMPHSMgbimfWTBES5z+T5GOWuzwXidTZEL0YRYA9h2wF4gZchlzBKgGToso+zicT/xX7RnL8fZzigZEJ2i5BOHJ7NUv2oV0vBmK36ss4bww+QuMn5GgVJaUQcerEZ2WvqvumacDMnwDyTA0bZNAKOXT9hPAwQpLNWq8XGoSQqzJrvZCRQjES12sD9XM5e0ZzeXMg5CWPUXl6dZEjGghv85X8vwyZtMS0kyatDF/DBS6DY17DR9tsRaoukAnfLjYtEyYJGPyTQK+4ocef7ZrT9udtEh4eRbvX6f8Sa1DkdGjk3EeAthM9xhUJa2AvXRfDH4duZmzuVnKw+f7X6Lq7ghnxxWHhGYuDpHRgzh2plnob2hPXFx7TC7uf7JOQaGhSvO5MTY9T2HxMRptAuTO1tvweY6Geozj8ptoc3WFI3q/hCQBASfi09NQ5qMCkNkhGKNARmio7ulPU7Z8mvoGQtD9JK7oLfXU2bB1MhgpACFgHFu7JWOSlU07ycu3kL127Z11flT8V3UMGBwAEYn6pLMt+xP00QM+MA6QF6RJy/UTtCzUik2v0GybeFt4WoNKc0ZzazeQW+Amg3n1dc/qkPR54qAgs9O7JUm7Aiq+MuyImYq+DdzJhOvvNGFD422kZ6hLhzfb3RhPjGbSIYQM4zBh1GdYlSIb2HT5ivbJPXLIN1HlHSsHercI+jYRvMHaUjg8f2gfeQEcb956f6JcsBB4DeuDhngNhHQrPlRvAg0lGzSRdecvjVEwPOV+HEZxVlBK/vi5I2/NaQiD/s27iECy6euqpk9ySbNK9K7M4AJsNb/UyhsmEEJ6Ea01DqqFMhlp08L5hr9q1CkSv9QrzUW6TIUlUB/tT4yLGvj19Ya6Eeshks/QOt95aCEWQm/M8ECxK2pMRlASnzFkTbEV0hGmHylqe91pC9e4PGxQLrH12DMDIImx9qSV0aiIzpDV1JDoKiym587jnlf9Mx3tV3yJytLTjVysxKOFPdytUDdH/4zr0nIl7Uz277UwCGli+3nEilsj2ze88qYA5y9KsxHcw7XiQMmTO4dyNZdO/CVZ2yY3EWygQX6PYA53gny6Ymtd9vF712y2t9C46FMXe4R/e66kXAAj4hl4z814MnO3weRKafDXq9LNQxGgWC4kYfcEy6QXZBuvNkePJ+IR2JHn56ci4nWCHX6P1HEXDOVEkEujLrHuQup1BuuD44/AoIbjpYLVTw52YO4r3DVaZIKmsINcZTcwEQ/Vxnb8N4R1i9gfE1QldP00MJTJ5aVDXvNTYRwGGjuuYWpNuIN4cZ7AbcFdgr/141hi5P7iUpjYX30Sde1dTuJIgVoayVKlzlTmgvO0UJFrpc0pT6qIIaH1a9II5enLGarnTapkL2MzaZDXifazeCA8NOvuZ6VTpcdjlYhOaq3G9aP+tQ8AM4VnEL8DIO+9V4D2W1n9I3PLXtWHM/xso+0HiX0ShC8NpFE105k/hMGjUTg13EzncxDJtmO2L9xgifjxmXsZPArp8VDutHxK9kDv8FkbjDTjgeSw1k98YZuv3MqWsBf5FOlKfVY6Cj7CH1KE9S7OGfs3zyvtvYFNoE2mrczd3W4hADmiCLPlIVo17sy2/oh1ds3sNNo0UrkivyY6WgNrr5TPh+0rYlT4RIjLnGFiDXY5q85J11zsxCnnjF0BmcBqkzBaaRkW09W+mOBgX4H+791E1lKT+Ny+XL6zBOCgU4SDv7SRc2nbMh6fx9OEvDG0CtM7b9g7UhOXbBi80vya3REDBsryyImTxqcvnV3PDbtDWMGy/l2waT75qE6svfJM6aLMimqcWcZoc53+UwtEMg/B1wQR6KoE9riRXw8HvTQyKo4kFnAwd/3DaDCzgFVbG9ZGK7o5QlSdCeSFZgtzX++vNbTsREcMrmvvmK67m5sjKWVRkaE3HZLH2DRm7tK6HOLDfZw+tS/Jpguqe6u7pejmdfjBmn/5yJQ9FuLZAtNWwnbtZ7lh7IlCPlz3Xjke7aKb5nOXgFaAz/zjOLq/jzhH5afFLyCQKnMfHwGbsA4OmVrVKRRkD84TjQo1ljj+0nHK0CbuYqJ+gyzMJuTYQmgqx17rZtaFrJaL2sf1X0tsFDrmX0ToBI2nuwCjsF+pcmxgM6BJBcNujZ1syHqYwrQjj3fvetzxrWr5BSYkb/gXNfvQ+BCBwCVSlRFZD+xRzF+B9bXInoEL8izvye6ZcjXa3DJBPvinbMycanXEsZ1rWLg7X8w10wC1z6ExglrrwrUm4JxHDmyW9O3qj0UPLguGEcUIGy67m324Ie77rLrVMmyyW0LueiZo5xYZ574neEQfZvNxH5f1Px+XdDuMMJow8juEzCPbQljfrJlh7x0Io1jLny1nJqTfoJ/9jqp0do3FP0nbZfwJyeDJstBzxT1ZOFySCxEOyqNNLSbgR6etDeXcCld3YWlq2g8WXYv+We+RRYGnJlFDaRU7mh9On0vVJbVyWzjdEKQsUfTxAUEQ5QRyyFGYTAJGH2rnE3kCQBnu7YF07r0T9n/BsFj3APE1TAYn0hTYm7CQAmDf9e0XNYYqBBIm2fWkiPtaIo6NMPrKegeGZODfUQdn889Phc0QGs1NMxZdVxEZQ9Lzrdeawr7BC5lg3IeEF5GFz16HZZ7RaiqyVlOSsluNerBSmx6yo6yGHTlCWmbM8tuaN4qTVR83D9O1gIgwxFYriQFOMN8DEOd3OrrU60Si9YNL0Hxt2RsDNusjZXDtAEREYztw5z7RjtApwysgikBGaIh6LmB7KfJg6T35+0bB6Q8OlWpJQLDCSOU1u3t/E80Fla2IRTNbKi8v0QPArBve5Jw1qnhES2RTOQUT/OPTEmj+O5G5fMewqQwJAYuP3LuKbU6a2k2vX8qa9eKpngyY2580dDz9vMTibX2ozxl8vAMN0B7y20nnNqa3SYDQ6WMiFtglfz/ark9ObNP8+VCr7cmRHq1c4FHY5r/GfXqWSN9fsOGIRjNTSGuIOUyeudOMwn7LbAarn6oR2+vcPM99iGQzU1O5pxmHB14dD/h9d735qbsZEgUZdSQsMU/8+ZHqmhMGcwvxh1lUmP1DwXKihf4hgwE0jJEiS66YtHtZvVDkMlhV0usAU0VV3A7fbWM6o9PIfeImtqcsJtA67odzaE3q3ODjsa0GFWXcce1Jc5Ox++hOIyNfT7Py5xcFU6b9Kw37XtWkbk8DtGMyUSzFat0NnYxJgWt3ZWuddiX5EaAKB10IzsEGj5WXzX9lfGR+iodVsfefp6msUUg2yo8XaA4o+nYZxpxJsNxoI1zI0VB8577zqGhlbXf2SLfU7AEIAfgrpSKgQzarx6cMazWMHS9bQFQDDfpjJ5xG6PfWjRC5ElGiAZWI0a2WsYRfYhjXFQou5JWIKtjNt47WYGvoG+JmGGUt018SGatf+W0rhDTPKPcZ7Foq6P/8hOq97Ee9fDMkCGvF06sCr+A4Am9IhjUKBtSZgznrQ/0hUarM9dCNn2uwPOhFpwTAs3BmSZYAj0aynnxghq+WlHUZLsGfFqNKijS/VE5Y8OmxkniAKfRknic2wZfKOnHaDeHOgH0YJ7wwXUsRPuw1B5e6JSfVrIx3Nv+Al/lGyko8f3RaU3/O5m3xk5tJCxkTc8wHdjE4SlQWNdWf8dgSocW101wdjG3PHofj0zZQCFwst7Ql+lmSJxS/tl0UJhEGUsGUGKPrxV54lnS/yOC9qwwUtjUvzmmIoUoHn58OWtrzJq+6qtEU4rmIQ3gaDoJOMho9YzaIBqK4JAyboCJCCXFX2RbIg4uZQsDetH0vQV5QwKgcw9Y2Z00CXAj4wYJRQuqGn9EViHOLBO5h/4nIF/MGfg2xM4o3LxEjs0XqCW37DHssjU8si4BqwnCPmJ6wnbEZ9CbJbuN2y1Bi6UosYZkl0+DbKgdpzLvlWl/OyzM9Fok7lvvegCiQ97DKDBmbpFdg51QMlgDq/9Dw34qDHUPkW1OgUwgq8ZQghymZkNnI5ktc9pcTtCTgWRm62Eu5PlyM7pidL5c+x+PyTT6EzzI+FZnvJKMtX+3J+TBm+3ShTVF2TE65PCj8U+d4zZcggFFnTz5RtEuXgeLyCUhhP3vykST3EaT9NNPJdvPMjJnC0G+x/2Jhl/fzmwLLWVi3vxRjfk3ZAIcM1DL6tPHzZjS8GhHj3jQaXy/zIeZTfc1AgXlXxNxSQXYfizMM3me6iY93vT/7s56FoArmRh+GBy5PvOf10DSOsDqtSdQnB5oi08kTvadzX7G1TRzn68WVfnrmp35HjW1mfpy8F1gvFrkSSlCaI6kCeDLaWPZZCS4tGKvENFKDcSbFgdXwxAZk+wonTv4OZNe771YbJX86k7cR+ymYh+XEgdtp33VoqujQrCNQbgrvA8DA1V2Ly1nDE/5n1LiaCshe8LYa+1oPKjF1OJToAVDKmhdug90j4YCU9r53JrMCLEn1CUR3HwyZfQIokTcBpMIQ8BBRROAcP037pBFGYrD2/OM8FfhYh67G0mmVjgoMb+P0zey92YJW5EoW+EfyB4J9sEyMc+BvfdmMgzVZ4azf7TZ/3eDGrgrPCESGnf7aDxhrSAb3+vWQzU6isw6CTpAoH/5tbBleidzcIM/jMKhvvs3HxFpJXskM2rzW4V0z8gPGpaHb5xfP+fiDunjaM1e0r0ICsH8AYMkamn1F+O/964nVvoglrD6t6DfNx1KXep6eZdaH/n4YlgyVDeXPF8b1R//Cv0fOpnTom1O3LBl/PPgG7SOv8BLDZ3Rjc3uw7OfCp1LfkhvkdtBd+T0DRVAfysSiJB5fqoVDYILEjoSUhCrY6GVQQlQa7eHwQQoqwx2sXhL9FUpyOD0mSMQO/Nb+RhFCHHRax9pMuThfGmkjFzB7vyA+10QQvlPcYZXo28hfe7E04rFHL5/0vUAVc9kgGphR3rirR4iNeeLm2P75Qp5uWPaM2exJCFWG3v11rvD72M+qgk2Lng3AsL2+1i+h5ffgrwp/l4zT6vbnsm5fhPTxyaAtAfPDJ1JsPl25QfKUaJdnoDcTPVKpdSMC9oUIU3vzRzOJsBLSQFOisrjSKO+Hqbh4vjXYkV6vxk1g1Vjo8i+Q+R8VpSAD5rG5CeAOQ+hFC+z+ufgQTeddSpAZwXi24KUETMQNxkra4KC/GdH1RxImvNAGGe7ksl0Q2mEctuVF8tnKrjVgWchuwl7ujOQgmOth2Mpkn5HaYD1H8RdM6/vVadhpB9DH7cI8JMB9oaoZmvrBBKk5XETGURA3D0FmwXX2+uOXSf+blMc/YazcptOXpO3bC9kSqmHqilvomOpbHpECw1DdAd0HnS0IInuKQEq6N5FaDRIOtURRt7t1OL7FYkoeaF4wgyfl6tYAT6PFLFLyMPBWM4jq1WDp8Pt7oElWwT3tteMurCvZMFFE8Y0VrlYTRLGRhTGr+BYV0AKxg39QvokwCbDrAW4uGsWKKwNTuReOKI7hkIU1iqf59+LfPK8jJrtgFEr4QZh4l26rzEw7oINGe151R2gueQvCunXN6qjUTC3QHB1ti6pas/XIzC7SJx/ABxcaAS8A+79sIKoC2UsKTLr2ycH3936FO+5Jjgu4Xu1kt2AHlGYdYgq9nUUHHyP3AWFKYDIT2KnbMxPe1ZkktwimVLT3bA4usRdOlZbXVxRjQ/qJbeA031rjVywnEqGfHPYSHfI4ABYyT4IzBg+oRqjN4vu5aE3z8aM3ukYU1wwNbCCIEILL1cRf7UFdzDnhW7jO3m+MehfTu2L8WhJr7/SUjLdI2n5e/4Ll0qj0H6kfYKBncSTjNqSqgNMmBa585lmK5JLYhDhbg+24fygXyreD+58hzm1rcmlV50/EzmxHpMOuXWMwucR7mDAob+WPWbCD79ZfOlf6oebiC5prhI6n9IlqMHBVK87IZeKAat7nMeBOoY8Kgcbmm7M5NsxYgkijXC5og6sRT+4I7UhQGTRcGY3vRcWzEFq57m+AJXfl4Y6i76tA/6nmQWPZVV6qTtfNSP0z3lgj2gqMmySl4S6veKclbYzAkF7OXA+/xnFZmiZ7XJoOSPXH2ip39q+uEeLG5ZqpYpwoSTbSJ5tZo+hx6ZSTTkq+l8VfRdPShOdOx6Qhxql2ZpM/ZMI3Q7kG9xibIeSdON1478+uWyAYz5/1Rgx3G5sZEkoGRxte8aq/Tjwmtm5visLEnQp19HNOsSSdcCj3rG6VJBZwC27Wl2XJVL81mSLFUIDsUzclrgrJDl99BL1cxw9py1qI4/jW5/RzCQeDsoQDYrv86tblWZDr/xYlqXdurxaqG7lCPQhN+y5zsjBgswXOeomZBmuuq3OmS6yJeTcS1PVr1xQKEsf9iM63kald+x7ULse6tpuNkcPIMiXMFdE2vViu/cRpc0u/nHD7JJFEKWyFTlkQjBiRUuAVGOQWzJNEv4tUuqfzpMh8kVYQfbT0AkI2eSvMBfKcnDOMsGrj0VoQLuCbGO6/LbNjHD5yKX5SIr3BOIKo7KEHkz36gpSpmrd31woPW7hZKjtaE5ylvj9Mw1wG6zrbHUWSyKBW5XTPvJB9qo12Eq9hQiKIig3sb/6AFQYVT+aN3k3YYFqFFz7zyAJg54rVI4vMPjWzHjAYrKsTWPagv0JIxqvs0zezlLhZzUxyHfdVbCqyaMbk8lCCmBO7iaVJb23M3uQynAemptw/6nIKSCRfYrBt6eeQxnK8/ZKZZ3sf3F8CFgYu/sXzDRdfrZVcrqmpxaOdY9fm8e1n32Do4xgPv0Es/KNzRiNWzZc92r3GxqfGz3zN7OqcIzEYzfoMyIhW7GnGcyMwEgY0dSshfrZ6ySXmlxKK+vLVBQd4d5+kR4/YeZ1yGdfUxbmHPEK4DiHPwo+r4Q9ScaGjbgUzEaiSJlhDC9SoMyIAjPXYtU/ozjrAV/7sArUWiKxZoKR/okVfrZG4jfqVgn3Eac2SA1V5+kkp1/HXg6ra0cqf+ey8deZ1K4Ugibcg3DT0DI4UBtkyECYggEKcmnWNoVzye2Vj6n6p0SrA5wEFUYvLnMZYbf3UhrGcMajfS/OlRa/+fhNjznRuVkjyS2jNa4YsP2ttukxjvRnsCv/shdtbZPokfYayA9tVyhdLiaMoDvZjPozJawuMQmtjdW0CeTnfYdYZSQVETTC6lgxwPiq4sBhBcSqEutR5O4Y92f4hv5tuHEJ4+xKh7tx+jT9CYXlC2hQ2ITJjSIFgF7bnNi+bDsrDMREQqDiH+nuLcJb06snE4BdzkU8BdaGVMz1lqxWdmmbdCwFP2zTan1ZFFqMpLO0kXnf6sGVLryaJ9YaTybpu+oyYUr2GugfayCvR39vr/4Ftk127XB+P8vRh6K4dYu3rEKLON1aKkHuOplcgNwZi6MZp0Q2ndDecEIEePegnfJe7jiKw4zia/SKvKcNQp5Zf8G6V65WUq2xXnNLi1p9U1EGNPg+Q4nR7qhvKJmxrd7jjWxTB52gelnnNacOFL+78cgpCZw0VQNFV9HESCDi7yjXfY7m2taQa2AAmbhULV2YlPyPmsP1mNbMRfxJCSoIp2I0N6pAi4d99jpUmQtduhcsffyMN4DOfBeQtJoRXByLFgWuYulaLkGQ8Ixp3dFEM/e7EDW9ji1RyTu7liGuTiVSji5c5exPn8yDm51a8vDfshbg9ptuJLkxUY168mAsnai1eZE7wIMTQJYIHmJ0MsCYtKCFFf7Q64HKVqmMhnVfGG3epK7UMQe/+e2S50+Jdb6lhKl0RTHD3u5T3waLPVJVcDlSTUvRDth0f4g3Cbo7txm98bwNsupC48NYNrNgKDFTz6Se//TFYkBcOnFEFFlc/F+BqU56j+5Yn2pODpUVqF+zt/iZolXqwztt0Ahc4lVXh5DeQjt/5UdOjQSW3kvnCE684bDrQIEKwa7yWRBryN4GezIAFdTeyVBhewt59ZsIg9vqy+xwtXh+2UlN/kTbpo0KdQwN+lS+aPeMzJ0QnvpTwDbh+daoI7Eed7rnf88zDESXC5onKHXVOzUpDIfMn0eOAQTNOaOXqHJM15loxhGXIFaJ8eBuZ/3qAX3Q5k2uy+Gx01MRlenIGbLkefk0iqSOnFoent+C8fkwKpJwe3Is99Hnf+oWm2CedcY/u2UrK2fydvhbfzw8iunwcV9EeP/diIdqhLpWWeqYpI1vt03wWkP+3BtUFVnvX8KxtggU5OCe1SmwoiyNA7fXD+WZuWVnFt6isc905oyriYGb7L8JjrV8XupdOKgxcwIXmVFeKXK4RFjRPiDahWDIQPloFGf9Yv8yvs8wS+k0SUZj+fiF4aeFEXYhROGUmE0NkR/achop4/yJSqD/uB7NStT8xnbqDyU3f9EpTzbBRgdyktUu/XFcadExVAq76mQRH58aCSzhnmrfOucmzQ+MbOuKPNqe4EAEtgQ2t1zwDBcPZzRoXOUMWBQfH23L65UWQMnUckmhW+P/6f2N1X2IWMbz2+Fv31LW57VCr6p4edg4GL2bWj2ZV9H2njbyZlT4LoMPa1B1jscGMgpAOKA5kzssmdcpc3bw6k4h209UGwb1keOrHKOUJOhik5P3Wwn8Jj3fC3evRQsHisJgo9U2Cllf2EYXua5eZa9UkJ0T+Urihq3AmWWL/kW4PhoEYAAAAY9u2bdt2PrZt27Zt27Zt27bdITrIwcjUlXeLe61oCgG/sxUiz40kFngOiYH0ewNIaNS8oTjCIfCuHuHhkVCW7yhZd44oje6WcupQsjqOcotUG8UHrq++DQlfU7fHh+wG5A5vLBgbAHWGHa5QHd/IH6suUs6U4zbCWk9fD7ga7CMXMNOb3ptACowyF7JJn/t+pudfZMsaZj/nq+c0/2patXMAAzcTk3m9n0ON1KGAnLLE+8AO1S/LVTEaqopoI0QWFr+CgxGQ9MQ/4RvAqRJKvG8lY12dKbLhWzYs6ig2q8bpzU+3m3N9VMwZFliMJhj6lSSuQ2OwWegl29tsOgLDxTe89eZXY1aVo+u/PsRUhfJtl7RdZeoekbMhczaxL9jSF15GYujX2G55CwXh4PwDZhv1+jBwzIShEczLojQyHKvVKisigV9ALhKwQiJ/97kgLsbQA5WueOimdhguO4o4TJ1dynG501XRAD0BUDCGV+id4yiFmyKuz4FHRv7ccvJ86oR62i9MfEeodLejhpAy/Bsz8sIHdITdMvR76ipANppDn1ZgylIAbDEv3/kdZ+qfuWYHaXhX/xavw7cqq/XHKKXM747AdnYSsUIv4cZaiKqm9AtFk/noFC775tQZLkGUHe+uf7sHLl664Cj35HVf4jBe2JbpzBU8hOMw3U+AmPeBlLkKqIjaaV0FIhTXNNeVo3ariR48mxO6jR21F+O50u8yELzg1Lv2MwBopuQmXqbKAAxeBUghZHeJUy+1y9/UOgI7FRbUvrmFc9hMz++eU7yAA4Tncxf8+wuBjx5hBeokK19QJzdp+plvG4KNdQUhPZJMQ8BofWdi7JbK6QUqi1ee77MPbacZi4AK6StwI1rzkwLGytGjC3IK2jVoFjwD7IrYqua/SVlOqbUsoz/s0KbjafqLYVGq6mK6P2IpktHPK+1rrYuWQzVay9Zj65oVSogERbcClWu1Vf6N/IhLxvxCEe2yTHaBFRPrzWNvQ1UzCm9AMN6js/tMogjw9VJUMUQTmLMWMd7V1mt5PwXusR+kwmtyNL6ga+F1NYqIUxjYwopq6Yf3XjdnriVtTCzwQr0wjyZ7SXR9WHK1cPLE8NnST7Nl2Vxn5nsiTCfAbEgLBTwu1QrJ7m4KDujZPz5eD2kqEVs6kdwFoYcqnS1UOPthjbEO3RE9Vue49YruVAHZCS7Uesq+bpyMh/IYpiajU7UxbmhJ8wD15bvF+zc30UGmqrttC6R4tjjM99Gagx22WHAIV34lDyQvMx1fPh8NSnfclW51Ah/cuEj8eRy7Lkw7txTDON9XAMGVn9OvKpbm52TxCeWlPyENVxVagLCu+eYVaOziY3b5MjXcBKOaVCCM9HXov+0rWA4BNEP/YzLf8xLktvMFvao2sDwf2p8oiPJux727l7NgxslreQPMfXZTH5Lq1+PcC1c2nEHf7NMbRYrtoxxdlEzCmQoZXtPpcdlg/ghrRj7uQ7r8gnvVlTedC6V3K6a3MXA5GjZm4GXyZ2rchghmSGrYZZB3fqlXgnV5Lz0LDn99/NxloID40PJwjSiOb4cSuJmlJbEmyi8O97RxWDXuyBugN2eP0bE0uQkf+nykapObzy4FguYNhGMYeos5feZZ80YJjwvCRJte31Qgn+y3DmjncVcSTN+eJwehp2o73IsOq433HsnP7zJnRfCNBWkRXCMf16KATQ5zX78K6plYSXptCwXDOMGEEL95PWryxF6OSpL/FRw7lPPgfe0I1coiMXuNhmdddCAbcMVf+nmuk2kOeNAixlSN/swq5PzRbgQ7ncACr+l8BVSS7FuuNCeAMEDLge2vA2OCYrL5mwSJPfCKXpD9+Dkx6154CL31wcKpoaK4CylLbdCnUfn4VMwIpCestLJKjidazI4weYki9AWw4dHkMFBC75IItfMefOQP8Ekgow6w+OSeUtV1snrDxpQgiVK9Y1AlUSv5BihMuKyALfgPxJ0Q/pJnSn0pNKRefue0KD/sTnCLWOP4bNK73lhIGPBsZKqlW9pWQeqpToJ967z4SFrGOGA71mFDsGVHxBmq+2f/MmcKA87v9rHaQYSwC60rdgbmtTae9F/kHRL+DTvF3+zFJUrcTEDMXR8IPKsIXgRXfb3+i905NOpZBBZng8+/LmqwjDEAVaEkdc/ND2532JC1fc5Lc/k6mJatnbtmAKKHoUYcZArtX0C/hMuqF9yndQoGpHZaYApUgHRB+jeoDQ+GEzkoVyCCDw6Ca1FTloRNh+KTQKVyyEJh7mQd/jct3xlQIBDy/s7eOMHQ5z8+UMR1m69jbizlUd3hlYeKsQZrgnjq+OmfPco/i6IYJLOG5E1klh+CqTi3T8C53OybeGEEX8GFawG1KwPUQnXRY4Ol+PvTfDiolxBHnXnSuW/QLvEzMcvhLmpMSi2FK8GBQaT7GFdZt+TFCKq45IS3yaRR5EPHJOmwEbv+eujBR1S7wCLECJyKFqQpIHJPMknQLnnSKqtF6R07xqMmlVTKYoUeDq6ovGUe64XlxD5OulVQyhSDnRMRZiXriweUPuEsChi91jU1q7tFgtlX2Q5SDT7tf3Uxk5yOw4VSRQnxBm1EvYskWqkzlRkmBDv2BnkigwbdApAxv7YoccgyK/67w6n/wltb+EqDgmFvQkL11wzxv3HNF8K3R5X5z4jSmwCoAa61DlRmYpXIpNNeJp5FmkAhXrJlujd193IQtsMh0BQzU2jZtx0cvibw38l35QKtPs0amLlzA0bD5jdJP/jE+365CpCmjsH2a5VKfWUiQuzSxZnbqqMXCX9NV0/RP0Cu9V+qWiv0lHMmmmf2lqIxvR82UixEuGyQUCu9fB8o5RLQVQI1rieILP7tAt6zElYhzO8krab4mfJUvmcFjJR7ZCru5jAlCUBsI7H1JA2usfqu6BszAalmV379G6+hKFjm3jee1xd0wbSQbxRr1b7RBGVd/2QoidPed46iI3t5wMoTzJ2VGL6zwtYDytdg3h96/Dy25Ngh4m8pOJXxeyvAWrnUGBVpjFkToNJLrbA337Clu3JbyYTfH/yetiKlH8MJpbr8fSWYQAqh0SHVOq4X45iNdYLCsbd3AqeDIenDlBD3Y2sjDr+j7K1FjrFdkslZ25ZUEr7wYKOAXjma48U8J3Zg4K4v8XNswgiqlmQmBy4SgN+JOFHTMnDi0FdptNkMrxIf7vYsgx0HVhTzsTzqlc7+QFtWAZfOSN2etG16lYqsl1YjH1xfo2zJVNY8O4QrM/GLxnoHPgxZaSoxC1VxF5rKkGFS7ZObCcajVS2XLMLAAP7t1BFRB5cazZcSEXMKpyfOLRWF93Ax9warXvjTPdlhrp2MOi+/MbJYuDzyOMu5sepfV/x0zdKNMQnxfhoYiLXOgGlFtBvWMrr4IKlXM3BV7LkCFl1BJuk1a3fzrULVEBbB1YdvycmOMQUfLWTzFq2ahCmk3eDzVDx4/jfulA3j7eEVxKzcoYZaAn/tafybZMRsyoC0bFUy2sf/oeFjxQehWNulZ7AlmhOzQrVik6kw2xqwBKij+P5412NxlwGaXKe34KyUIXZhBZ0qdxGxYrZafJxdSG8YR9bUhd95uCd7iGMPEAoQlxwylyrjlN713gH9Zh9iWvi7RWObcWl+JNh2ZXSacr4Qi45QatoZOZzuXe90Q1yxg/oMIbFGCrJvxcscyR2F0M1D5nkJItDcWN1DmWh/Kj1sHwo+EsrdAokseuL0VHIohi2+1a9MbzV7AOQrst+xQxeQ9tX7rqOGVDVEBeAkNSvrZOzAlHDx0sa/DQGEafpmfICpuo8ETsJUBSo/yBKG01Y/GNCXSfuPFEgeqK2l7Y634TI08vIBtbgV5RjMVIVFtqK/Jh6c+/IGD14xWg8i+bRebx96HHEbeQti3tgKDQq+GJW2wGeofu1F889z5ve5vLfX/6BFZebMK+Z3GG3WXjLkesx8UfVPawUGMU9oqCHWsSN7meZM5EYdyfjjV6ktLEdoCuTV/e54Z88i5bYHuJw1ByC+LBrDuDlJn7/IDsmTP76Q3FZIGiUnAYRUjl/Nscknjeex2GoBIWL9XcHV9F6WgyaNOiZG6b2Kz14oA1dF5rWGa4P8drD4Isdb24QdWWI5pOfCC6/yB/VoUaKwnIzwjfyr1k1ZAJ76n7mIZXQRwTcEVznK3kEzFE55bXn8imb01iDEEK2zGXvLQL6b7Bt8VjEIPdazx61iHgL4EXID5i+Uoh3dmhGxYumYZT4ICU/S5E/iq5CxMW7hJy3dL2G/BrrDniXVMZA3HiiX2NsRk2+kK4FnMnrrXF0NWMjlIR4YO97H2jzUWz554FZQB/1dYdXBgh+EvlwBmsSpTud+zOg/0pYn5T00bWKax160L+N+2cPvDbMVsX3PgUHGH4W9gkVNRX0N2XC+ADvLHsMBbRHLu1cHxKTvhB29q0IjGAXjLvxPpsMJRgruPShcElsnbBDgl8L3gTZaEN7pkFFElRxeZQjlWhXmkauhGqUDKb1BOFYCzYEipJXRPLocExHDrGkLZ/CSaACSlhMbpe/JVsKe7OYK+Zx+Qp934wCoHGnWvOKg4q1VeV8t6ak0ZNzDlQPXTVnFblPy0UnPiFl8QTVwJyCjOBrw0PL2Ip9fUMPA1qcBWanqrJ4zUvODXyrAA8xd/Z2Jgf1v6utE8kHF4S2PA+1/aK8RZjlGUnV3NgqZ+iKgatqbHuZgyhshQvCLL9YyOxqDdgM2qDgwPvk3r8POYjzq3qJTvgIuEQuaNRG7RPz5ELLrUm2VfNwH5VmzLrEfKKi7HR4IvsU9JE5nClRsXZH2f1gXPJlmkIIjkgUii8sWq2J//REJtoXzv7+hlHDLYj/pmWWfg//8l3cnh0i/zS+Enb1GiHNVwnPKmtVhSWOmjhGghXaNdgu+7hjbB9ZQ0YvfDBK/xA3WaWi7i/gaZLtrBZ07ABrL05ua21JZ0YoJ65Wdp2uAhR9xqG94jQMypo7mah2S7XmEZuANola/nRs6SOthyQZsSNWI06ckHvrUEjQ1zqMCNPzq2Id1CZ2Mr1E1CsNOZT0AcJwCq91vXVcOsoUxWyby8VrCRykjva1csf37pI+SSfZNbbZFf9T2X49fluZajuXkDOmlHn8MITcFLF2iO5iTD311rrUkszMqGdQ1jFENOfr3HK9JBwInq6zVzxMOzTnsGgnDDjpP6jPJxDIAzjhyf+4LiBHzSkdnao4u1XPQOpAK1IFefQohl9u4ccTv8MTCr9AZNr3Hk/ZQ3NbED8IOgoKWYlLJJq2H1IVRpygliSqg1Falu8dBIZ2ICpUgXfw0P4d0XEvkt4qCrcrkyZi8oPTpUJ38mZetmo/k3qcLtCz7xCrfyBiXy0uoB4kA9xgX/jw99bHZkM9DaXicU7R6KR2C7YO1Jh30SEGoNT4aM7OD5kVRx+JwSLUcP/y8OByQz6PZOnKKrrbJMkQN1FHMp/DajiQWIWdqomzwuL/699n4EfDlu5YLcZg/FZYEbPLffw1ysZvuHgEc6r663ZrFew8KIuDRrzMHNI2fZWse/HtsnmyU2Z1kVtc4Dbwg7k8kf6kPH9b1crXHEkVmfBwdq1rDomjmTmBEWvfL7K0hGpAYWScVi5Tm7QFke6bspHuOO4bDEn/uyibF7YyC+12VPJb6bWnA8ZFj073uJByP8ED4ZGAh/P4WwEiK0muC9QH5Vrjj+RIcI3hwdR+BxfelzCfGhgtSerQ21VuojgGfZ/fHXddGVLjkKD4ml5i9Ai8F6+H5HrqKvKK+G+Ui0fWO/GM5s8wWMdjPWPbSqhr8l0jX+CfhxKX+XmfNzUlFWp1NaQ62cOp1/EK9JYTyo+WcHYTstuwHi79/E5b4l7VhNb3y5e4D7oBKbNZPR1XPhNIrLBAKyurKuDscmpYXfKWLzcXbHWbs4MbozlrRmM8cigQUW5cnd+6v7kK13Z43crblx3gF/YpSZjgvRiipMiPXud+LhSOxuur0WWb7/jZURr7u+r4n/VEb39gwKvOIGlJUR1hQ0V9J7gERaOiF5Y4eWRl1hKmXENHTVRZuWMnbCSmNXk5OluK33BWtNMaBmXUJwLv4Cxt56rBK1zEmtjHo1yCQhbvb2APwDXCCM46lrLeYi6yufEVaRE8ccBIo0eRsxVjA+0a3MMWVh/pdWNj0pAAsChOjG8ORGKinb890bbBBiqgdMYE9TFzjaEudHCDr7v23o+s28LQZJwHqFzn8m3WBm+YINVcg1Q9ZXgpgwhvdW9G6KAvLjKqt8pZSSbWPCZb9zeQOy8AVjMyYMXiJsBka1DZ1yaE+PUdP5+xbgnZ0YkvlmnoiO54spRQ1teVoDgs6WaLMed4bccr00CT8mE4g6jFGEqBag5H/EcOF9d1iA9z6mz0r6dC8fEMuhtEbeLXBNEpC9y0Q4uLi+G3T3pIwR7gKcTiZs+SJlUAYIrDkEXiCi6q7+zBb18+VWOOZxSzVS8k6+Em/ZIWSQ8Cfa8zwpPJ7hAIymRUPXQB0Nev1Y09GOFu0rhCtC/PjfMezlFrW1plk+oqFnpATWga2qsE+AOy7XGv43KzAxnx2g1RTxjZgc1ZVn8j2qRqktkl4PFfBOyBt1HjaOm3lT23nGIQp7fVzkmuknckxEQoBI7YRyxf4dMptj9xCDXxL3k5L9BarQoQHhiaRR7pt2oN5AHsxvs2s5OQfHwQNx5HTs3zDEdQVvYeT5aiiy6kDw1VUfuLA+nGFDnZTKPI+3eYNFfVVk+UGwMSLiWWrw+6R9Nvt06fHcVtOm+5earWOXfYXJl1AoAdZ8qlS+SGrcCqljqelylCJkpeRNwleDbEBYZ5zXm1Pdm+LkYvoDlKAUCdKsUAzLv8e6dNjtOUsRAjjfopx5JcHaE7m1Ea0eyc2S+qbbYcXTEMDN6d9fIU9FmJxjTXymPnk9vxYE44SUgAeg3kecF/IOwLTulx966emmSnundnlcRm1BTFhc4LuE3uahr+l3+Vd7HIVPPporjBCd5cAyrKvgbdepiAix+Fh5vcpBrcy3DsvAfDYXpEXRZjm5hEhJX3nQcKeiHoTh48DqjJebPQoAwKQ04Iqtx4QGkjf016u8nQwdjYU/IvgVoU5NNFT1IfVN9GK6ZHvRORR6PzjJ8VJraroF4GNtYjEkxsI/qrwPE4bFobskSOTv0GE7eNUGgPsOrxlkF7g1ear7OvEbArnK0bPZT+tSHciuTcZ3WaQ5SXSBEOzgelkjHBhzEUH0W+clvp3vbidl/HYb2bxuDEqZC8GV9n6bX7Jm79sncRW8i07909k2oB/4Np+eid0fMn7FFU5L/G8uX4bTPUeHNLV9VwPWi6a28pIW3KsSMA6aGyMJybrR4ks+HaCRAkas7l3SFrZafQU2QJ1GXkkIdsibBwXc7qVVikmRna1Uca4dvScvhJVs4TZIIwV2odwzrqqjvEdGninpW6dAL0bOeUXwzKsPZ1BU8IfPnG/J0Ax0StU/Vu/wtt44sMpyDyADIXVKAGOZlEcN8NULLU4q+ntW3/bFByhU0mPvJxr8Smoz0JjMXvPo/SUGsz42woudNXghe3cWH1bdqa36vLCE7FfR8z88F/B/JImnX5XFckhV7ZRuFa1xTEevj4nyCJJApGqifbyvHRDtDMxb/Ol9O4B9wha3ppyWvRTd30ZWaLvLyInAzxVx6jrxMB+aqIcnsNd98uT/fL7LmX6vqDUC1nSGuBqcJB2tdwwGpmn0OmMmZAC1DdY5oyjTd0/2i8nC+6ypLNBzoM0fkvIEt1Ecg5q7IT97JyIpciUTQJkN0/4gMdpvYrHc1y0gWTTUsAWC8YkHeDDytX2eT9M3rTOmcFc5Jk5DgVxtGjHikHugwg3SE7p4ViYDKkIq1B4azegV0xQ3Zf7WtdXkEU3sbevY7m9nI1jyRMYUn1wwTB1JKorvM4E9bgWU5akeaYvGQEHwym777EelXL3Pxahnhd3cNPkWQw0bmQ0wQVcfWoURzJUUt7ue/IKEsrCt6NMl4GOGpB5o4NeGe5NAw99zntLoSP4mR7qqx742v4PZy23n4U/PsLf8fjIY98Oe3FaYIFb/e2SZyX8iWEYny6anF+o9mhIGAMNuB/sIiLRtPjJ5vsn0GxuTYW4oRTbyOc2bhiIKZNqXqF0SI0LfzGM3kWnlmkJVZM9cuhYyIkCioqTy8QjcdN9AlRGHJ31YyLE30kfykIe3CQX8JeOXxUG6Bm56Ng1W9bTqIb6KD2lnzx2PCVUJFeQICE0EHym9Y/OpKGqjwPUvOikHLuyRFXHmvIsxTuzgjd4oYcqvAiEV2j4u5wj/tZGOjSXCAvmXn9MIhujkIyeUbh2rcsX3u1hPIzfgzOBIjFLk9rL5laa7VripH0PfTESKl1LYxs3wx7NWlEDxt22ZdjVLroFhQNcEsuhrIjHAldF4Y6qlwYwfEHdAjwwwNuO30fr17XBZ2psBbHXcBkwniVGvbXnrdWbx6gy2VKhGwQSOnbC72kC5rhIt8zZ9sujb78uOAEB3MPxE/JkrLW3iPk3stqkKvvO4ATq+mSRbZikIEwgrrY8rK8VGaL6NKUSDNHVte6Cv4yEY4jLr3PQ0CHdTolrLqWan6LTtEC/NJRZOUtQflmO65UJ89on5ZFhxOfQ7LzCIBpUUTeJ/hkemYmL2vjmjwW76Tb8YRqQ5OaEfArk1c5gSgQsytrA/H8iEfrAfPrAzJkwegU7NN6jQXQNC/EXP7fsgCDc2mC05x0JLz7oYNkgtU+Cr+3qkSd+n7FCFCVsP+hbrMu74GjZ3Ai1XqdMcRei0/nf3Prv+htRwtj67R+Qup4ywcWDJv86wKZNlwULrSjYo+aIl7j/qhGNvIm72jyXlM7+tPvzBPtsE8xWtBLy9eRM1pCBED9H+ZGl4BNWdv0j3JOGBA7QVrLGZhumpfBB6tSxXvezkHKkJu1X5ZIBKp1RbnWAPWodlrafKAeEWtcJEvVJx9gJjoPkD5Y32VLiRAa5HyCf7VuHvutPxxCciL1nc/0vRFwqpDBjhITqjEZMiC3SJ1QNv4/aD8m5cMPT5l4x7oBlZkqpapTno4Qi69kvTetua6KIPlGEqo54a7jgKaN+WZC33VYUnd8V7gctmJrUf9q2VtZhZXOYxu1foDS4xqkA75JSEwEG012IbLWW102pRg2eMXeXC5vp2zMHYD+uzkFaB0DPVUmnQK+cwZeCGBgP0Ywl8V/ozQeFplLcpaGJgIhuBGdnMTm1zR3OBZeEdCa8csCfJ5Eu2/Pk2thsRIaKJmM9e+fc7mU01jdHHLtZdPpqIE6mtTuBDoG5yvwQ7MeO08F+szKjDunH/y28UOT5Wj9pZ5+6ge1EsJ+0JLqSb/hWig8hgMS/fDJldjEKZMsISfl3eEQCVQT6aLWhuLnveCRcZjugf0Kx/Pj+MqHMSSAIks3tLuRboWEbB5w+23FjbvqOLTXdWBzRIsoyz8lQu0uWXGUN5rhMbvQEyxkqvdbGINIJN913BgbLoPAypZkEfmmYbh09t7nNW4dzjvJ+CTpRoIc57nb234h2JDjKYPaEVPGoL6Z4oUYVIpNhdjWHkEjpa+9aBgf1+/ZiAmUP9kWO4quIiwTZS5k7EignbdvaE08S2ZWyTnXO5S+SfRi79ctW9SZLWHSzpfZhlhjJr3zTnaebeNEaGYrT/JtbjKGcVADSGxmxBzfr3vD4p707tWF6t7783PuJCnj9SBTSOTOqwcpR67832nPQmTVcFlT5Moq9l122vIKRexDcIXuUasxNSadPonbc0uhr0G0zHNY7eaGL9ygzFner8iHkN41ZOKccYQAby8XOs/AutQwZ5UDGFyLBq+KjxUEDqMbz+febLrePzvzQv8nx6VHW5BGOQXSnSJy4PqWxUINXV6eseaD4ZfxvkORPIdwyE1F2TsE332rK0oXGeCzQLvXlm4HZM1sZPm55bPtYDQa7T3dycRQeqd6U18j6tLMq9/VZEnuOWLZWlXhICUX5vJRiCWrCVTAw+MJ0AnxdU5IHiXsW+e+dG+DLyRJaDJA1s61D4w3SYDRnEURT0CK4qpTqYy5wumDgIJr9G8RjOW2Mf7ASd3YOhrDoHDB4GbToakJnpu4W8UUa+dHbgGd3JciDlgCVEdJjQ+LAetH2Z0718yVGyshbtZnyJ86SxVzRu002LyZG4w1efSBIKup5EYHISNkHsn+mg/yQrBVuJTtCaxGIqdvMm1VSJxVQj12SX8HFIZoA/ZhGd06TvQrJRQYpM+AeseWL+bhUxzbBoqMkSia3M4hzk5+9H/jKSj9gvQT3IlJLcUyY+AI3HU4qz/iBEzN9qus0CT6E1nFKila/hdi4ZtZ0gPgKHq8441xQaUfbCrx2Ucdd2+9cs5BiErN4S7hWZPBlgBLbt5rJdj795DviM5/u4SCS7JYljBf3SPGuiSAZMZTuSoJ47IXKGHYb9KCbsB2h1NQ7oHs8LpcMLG279Gqh4aq9wL0mg8FGBczoY0sN2OKLs1PpaT+C4l23kYbrG5V+Y798YybYlMlf+CSSAO5sc769he6nm81Tu0BZuvGJM19C4VtSENA0zH97K0qRttKFbC5lZ8IzLw714J68g6n8zBX3NY2BqfWZDZSBG1PT/AG0UgkrxIUsrrBCAvlP99wekYPQ3PustNxSl9VNhfxCm8sRQEK5TT89O/IJl+hr7I8qw6mHadnwBlkoTM5XgdEa7+YXrXFLAAMY8SOCALzw921ei4q71TUiL9uWMHEpLmn/1gLo4btkiy15Tb5SGnR+UYtXEWDYlsiXF9CD9zAZtGYf3RXbuh4LjkRawvhvXZE//QtOtoKsgnykHTrwQEnlU9taRYV87oBG11t+uxrSEuKeNARIl+n4o/0vDk2xD7zZ0b3rQ9HliKL89WgF1jko5KMdB4dJ1zznbx2QBZVb5yAMGQctXRYFg3/Rh7JyowPVfFWhO9fKV+8oq8k3UWE6aBBDISAzs2yEoJ97tGPdrMVdcaya9FXlnNiKPDqmDUL2+w44Szx6R6TqnDDBxsz9bhHZ0vcQAFzLg2yt7UWhH2SMJbkvr+v8qMZ1DeEYnRi4jGSsui6QTtlBMQVGp+vXc1MtbIUEG7htaressttbty2sgZe5Xr7ey0BG7zTBWSVe1hrOgAnZ+V65gh77HRSiU/IttYYEHmwi9FCReFGv3NAlB1RL8Qi7tL1kfoggzebPDsD8H5lFGOu4iL3ziEp9IkBfnZf7LgDkT0+Z5O2hsFSYBVUKPu8XUX12rpKSfB9rPnWe0ZRyVxczgJqm+f52cDx3BcOtZOFmVA8mlyMkrMJSDnsOsxy/iFdbgt/lG85jdh3XM3hGpB9zijw779sW9FS3NgEje04tWxIyYh7C4aLjSOn5pUvVD1eV18S+Be/2TG0rGJY8GvwYR4Z5lXxvlcU/aTKU7nceDTi2O3x29/sXjcysnWTyHzfeN1WIgqkswUWpInhSwQxQ+5r02/ao2znXSvtPUWvxUCbKBuatSydKwudkpsZZGTKicBbdtikNu7mWxgvt2WkHnzEGk4niVcNYIvZwUCN+W961St6aw9Msf1vEFmhsz+Zm3kgPYFx3h4t34YvVYKeYypwS3AsPQ+vg9L6uarAdO7GbmEOgkctnsqZypohzHKxulS1UfZsDE+XFiP0Bvc8DyjNfS8Kry3NxUDm6LamVwQMGY5nSf6wqqCKZekaWLLiS5InhOuso7Yplz6jN1IENihvkmLkXODyAv/tPP/pRW5bqlHe1tEuB8pOFDSuxhR3O6xqN21k46Qd9cOZCYgnYTQzsu2XHnYSCyZ6kV2js39SNg5XrHX9hJqGqbKfT66GtgFKfHkfAjoO9VnpX0RDfWwQ70f7vvKl7BPUM2fpYwbghTQeVUIpKfHq4xNGx0m9ITbYo2QbaIvxed/wLliyuZR2dQb1hMocB/XVoq3yttrMaXt+f02UC0yynxrJ8iVnch3S18WE4Kg9tGg2c/yTqGXQvnb6EfxvoeEvmhTIWSWgZ4URJi+BOWCsxi7k/Nue2K2HOGYjeQlouSU59jRyowVeAIrPtMxYqKh4TaYMbILnhJIbmhznQlolGErP632HpeTwFP/Yti7WqKQX5Hsi3t7KlHe0OQ2Dfwz8VD8kEy6t8LUpZGRsu+GEQ2dlkhZj3/04HVJsFA8N3w7X7VZcoq6CRPPpaV9fQy03myrG3DrlNDLSfRkk5xpruXLLNZwtUi0mn4oMg4O8uLZ6g8+GkIDF3Xkz8tQ6lmcfoIkh3T2xysw+AUsEV35vw7vq/1dQ08ZDiPb7NmoMf5uvaCL/v7o0nGica9ZccXjDbTqdlxITLb5xyEPa+veRRJODjiOcSWXy/aV5HNrxVLUZGt4AyT9TaGgMHOvvPw0qouZvRfdhm0beYHV+Rx3/joQIZIK9xQRYsggvzw6YtHzHI+wZKTuZ3oVm3PCGw11JMYYYmwoHra/XutV/RApyuTZ8gX1BfUEq1Itz1JIznkCdaY/HkL59hcuyt4/JvnV+vwaZ3lbclraI3dbpILd9IjMjIyLfpb9C/NAD25bHxSS2X1I8KZM7+YO2RI17egp5knpS9xwY7vm3Mu0vdlfj4PA0SNk0b1csLi/kuT5cVUzYiP4E0/t7m8awRCzF86CpsADytdoYCYyutCxIrQyeToSmhFgTfl0x5VRGwspmzrJGCd/yYv18MOP3VsCremb/DRe30bBCcoKlxNbjNlvZ82GMUqwWXmdDdLaAIqcW8VPFE1LyRY4fnayP/2qbNR5xZZxDxkRMk42l699rKKtmTmytwhwr/HEqnenWy95JJnSRjf8QBT9ShbEyxQQAlbgKsUklctQVxqLMm9SGtxAyTlDPdIWsTW9Tlu7+ub1G8qU7ke+CH6nPhO6/thyzwE0hUHiffS2YUQ26AHQpRASLoRuBYvpBhiVrelOD3lSRLcqYWa97n4oZiiV3DJcdxepjEuN9R+URp7uEuA8RXIUoi7SOG/gE6dk4t3yKqDjORcK/gIufl0433AEgCLCHJXCeeiRPve9p4PzHw88Tuz0nfQoLmoet/5r2F2LG6Bn6f5S64AxIg/vhWjZTQQHAwDojF2psbW9DRsOwhMchGXWsQ/Yo6nJzzR8Qt0e1P7ZpUK4fMcUvUW1rzyufOF8V/mrKSjy2hlqDqi6vcbe3G2CBBaT4vZaOjNx3u1AASM9gHLFwmnO3/RDfJkKV5pIeVRMSzT79irY5qcggKSx9AoMQ2JVSyfCI9TJ+OsNhdnxQlsiF7Im9GmSx/rjZjDeOzwi+tW5O4UwUE6NfLptz/Ht2VvrxE8f9LcQuUe2dNLhYowRzX9x6x62S2jStt+okmZ1Q4lQ3r4WEmWxCx18S3k5fWscPIfliVJT+4G8jvEg0a+Y3yvdP+DpmtGFOmf59D/Jlirr2Jr+B81LZe8ngpxTzgrVmtSthpvJOD1aA6TKNpMvp4gmGWJEfKYn08GW1veSUZHhR/0GFkarc4AiupNRIHOdxLrNTLdfvVGlXwaFcDupmukWedN/gRaD03TBkPTA+QHTOv0igdogkycwnABfrW9JdykrwR4QU2lgRZaEqSmyX6TeLoOKsiJD4dHlcHWbuz31aYB5CyVe7qgkNMZoJqbWA1E/kR0s2R4J+dtbsiNF2MMwuS9+eDICwFsu/gI7IjV1BL5En7iKSfqxFfCnmTlh+rHeOeq7lVfULtlv5WrKFnlkf8ESU3qAkuCZvIO3coMDbVU5iHRl+GqgIIpAzDTSM5K44nbZZOANXzxL5PxvvEk0XAAqPYmEXpfC1WD9zCBgdrQpNfgbXbJXJNb51A6FyvFSsSQI4PxSPQsrd9nKNmbZo8Wkm/iLqVZIi5bJR5jn1BzNlexuVCVi63BwP0jRWlk2Uo0pM0wbQUeg6mtd68vPbNvZvD6jnQ27rRz4CoTXJpA+aBNh6ogNvBRq/ZVw5J2QCm1ngH4yH0rLEva9hK+aYAX0Hx0ea18Waxt4lkzcypHNzgoKVLunYKRfcrmTmT/nqOxvuKCSQ4NuQ0iLMNvWKYAL37491rKenk2xa8KgswxyXGxcq3fGRaOJZfZ8rt2gySwBdElyMKCFPmf1orqKpVvUlNjo7VLiXN/zu4GTOWbPvQ7eueSfiHmEGaEKG/TX2Jvz6Hu9rpBPc4IN8F3PF+nLgWfGfs6WquDjX/MkS/1zictsfzklfOdNvJXC1zBHRM3zB68Qcm1TGh2C75OdY9Qedpx1DZnEt4WILgoWYDMZsSwSKbeVI/8MvXNjxFuCsjwg0adU1tBfQMT232bRoyELy1/w8HS8tfwzfA71L3W+4gFV0grLhP4HRB1Wdk2w7YEVXuJgyMnUzFXihodOTjOR4RmFj18OFGAPPvusPdsYCWYJV9zYapI67gzXLNC4eZWzL0gWiHsjHEVbJNG/GxI3gLJaFSUI+ersdBEeQ2+UJgQPNrJsDkZ3lV7/GoH1MVfi6k07Np9jRkJ2u1xbmvJvaW/tWYWGfB0g8OCBkOCuziuRdtrKomT2UmpbnxE64f/HFm70cib1aniXQ+nbSltBiyqJ5RNZJoo0VYIgSPEP1jWcPjlpJgab4HNdrvpJWXy9C3Jv7V/8Js+5qei+qqfhOLPyhQksIgSFg+RZ+n2NLOjcIaWE40Nl3AI3a9zJ+KAJUSX85hBMF4TDDoWTIHzJTMphsyEpMTIgVxsROaVXeDd1kOmyGPsri7eUK1eJjID47XqXFFPSlMXw7XclL1EvEUR15symzyMs3ToCr18uOtCBEZAldIoDjgw9RLxVxOftX6isli8Lj0XSBtPTeR7FKrwUNjmB1/Gk78dszGqFKTRVzhxTJbjgQ8L1P/qLaWEXqyc64EGU1nIF/wSMW9OZbDRZjzo1n65mbBOJ0TwwVcqxjB8zsbMyZVZAM57fBMYrC2tzQpxlXCybTFES5O9SPk4DlFQrHB3xzr3R764hDB+BYQl5dFed+UIFxzr9fjbTk3MLzqsFym1pLpMELxB9KEfxn55graykfcSyKPNBn9+nEPSMv9iK3NFmZDqbLfs8wQulTrbYMaeskLnxptfyS9mumgv5H1MyRlxXVgaD0IDXBj5gnHdrVocI1r+nR53QLC+Ag6dQJm/dmE1wMEbaPJ5ZvJ7Ii1u5Zdglig7zMWDa/G7Kbu3SM2hZK4kuaB+HTW4p9XmGJSvUNWSuJR9ZYtfYfNXEJSt2ulinhjYSLvUajmZ7SDlzMmCA9nkHpRVvciYJkDUCHHWJE4ALOYh/2uS/gOELLtoAe15eTixvCv4QxLdQLrd+x+z0KL1o9J8kGgKC05obOQhXjHFpeyGkDc1J7qV74PxSY6uOJLKq8/gT9KgzG9UL1TLmdFAee9858QW0vWe35t8jaHfodKcgIQ3/HocTQoduf1Kr4MzyW44dl1+wXL31waIDp24cebRuChhBrEU4gkqM+Fs7wYBa7WUAcPp+XwP96UGcnBQp7aEgrkK1bwacylzhstBX3GO37Pww4J3Q8QKN+aJGfes1l/NAjxk/NoK6vKk4SNT4yvBUZcNHcpk5O5xD/Gbc1q085+QnEjUtRqxEwlW1dxw0Mz0Jdnsa+ok0fuIDAtvIlL4vA894WDJQIGMnrPptsAcGA7vfJNnK7mo2ZmGzoqvjvX97fTS6I3E+g5RT3Rd83VChSdTg6B35FtOW5kCiwVfv8ukwZe2rToCoLq2sGIXfgCE/Se/vkNMzFTQJky3TFNDNIpa41/2ZIBwcgjvpgKAalkd17vFKtqbsGVklcJ6BlJogJg5cQHLKIMz2MEU0GLC8vTlun8tPcJrROKeuPqwr1FtaIqg+5/rB+qfITsD/kz+SoZ7S4RUddCrQZ77FxkXA8lyPeGD/BRm9xrTE8HEJI2Ajd2KpbY1lqUs7tX6jAtekLDcaGPOaa4lZEAYQqZs/hXN5rcNQUWuVfcR9gAnb/yKVsVqhRMYKJ4IXUIv3GHpH4h9xg6C/6ZQBFFZ6KlUPjUwADmKmCU6+Rc1UDMDi6JTHu5xzIUD/UXw6HZLI33MIdSGuxnGEEv1U8zv1DIgnRXBEDTn2dlgewZb4WQS9zOXyHdYIvbgAvSiXVnyLoOIHAeM8txgKkcaz9zUyVfHjhNRn78hc92CAdTQadFvo/uWo5zg0rbcajMR/uZRb10J7RyJkJy5i7q0jo1yKXAZ2M4Z5kRVaGDNVRz1crwdy419dRFzlWmSdAhF2MxC1TASt5i+x5IkIOhgHmUVXPaYstrTAATs24U5xIKa/3XQlEDrmx/Bw7/PN+OlnlNK3kQqjbrzf18bcu+pFX83uC7c4V7ZDa5liN2hCjzkXmQtiQSJ6HEgeKR0Vba+Y+P3Yy9wGC8Tb7WZctp6AHwyuUTZDshdhKssI251M+ndXRYhlNp5ATBdVqUM+HTpYi4mp/PUFX09ID0StYEqmbfduX4vAVyKAzyip/nl1SI51gGgVrmAVlH9EniguFaw+nF0llN/WPl2oos7UZqC1HIIDnq2NAYFaDiai8/j/iAbIZVhBSHDpW6KfDJO4kBHaLaW6wMjfN04xF9gEaZ90o0F/EEGW1ELVBxXo5o6sFTQgYLbj2woKlWmtRV0Hk5xDC0KJkUpWNqCa+XzCXzYr7zGyqUnpikkcbpbLKOYS3L/Ap9ZqJVZ3X83bjMBR2UuhuMlVat52en2G5cN+PK3QB4BkzAZgmB4vPMrqDm4gJ9CTimG7IrOECLIw2qOjqCSPk7To+ZB7vozFWFLhLftYsdVUriiMxlipgk4K4ceYCnbVQa5iTMT4pNIhc6PeYY08FnuyWU8VPMzAZxejGTrA2CmwAIiMLpSVUrmgsJYLwq2SCuVnX49vFuWbgqY44PIydRruEJLT4xJEgMT1RB56XHqYYIRGhwxg9DS+VnIOwUx0lJT7i2fs19+hJAYlooXFu6BSRd1UZ9STyIyRl0ky5E82eXaUqRhXfWoG2Bu+9Qlzjn+iYK10pQ0ymoZEQmYrBOtVkVsTobSNuoRYuIZyhG4rpgBmjuo/6AzZcMMf0CgHgagd9/NVSX6X1gUnqE0Obq3WdXrrizJ+WW4a97sTnadWXV4VbwAYdG2D9Vcj1n8su4q1rSrZ8jo7UdqNQq4Wjp+a7w75iiUqDiEQlQcByo4aBU9XcRdYxsTveH8TibK2diHr/IIKhR9sOwCcuVfd+n4IQgL1eBjZDN4KQ7dgY6hUmLcM6mnDfzUgMykNA8HstEkavqioYFVjn2TuQq0DT3NmTPiiE58RwMHXlCfeNHv+I78DqLKKlj+e34x8B+nTwqf6bb+x+UmPI6kG70VQV6AueXw0vKEUAE/E6kmkYdZIrwsW3XWTKVgSkg4c/0otYiq+6yyMddiW7INwDB52EEV0hwWxhHJf8aeKQnVmOMEZFsyxUxe0cjDCNuBkvMvEIK9XDmBJLDlr5dWYtl3Duk6w06Xc6Q0q98xDjLB2GuFL9KXXoTkrk+36c/w0OlqBmn4yN0E7d6X9O5k/CjRFUmHJDgqXXwQiFj863Dk+MBtqYNCgQxeSW4IuY9bndjTIBvCZUNV/PhKQeo451qrNsLnMCEQVgni0JLoPQjGq3c0AkL7b1/ty0aGZ2G+U9YZh423GjH8yIkXdtvcXt5yRvzgaku6gl2BHeyijfpsFoxsGHGQOGbwiBtZ6/Sz6q2O1vx8NWjrVSQ5pOUZISmLWv9CPJP4D9kAC12HN6aTQrTsvCS234IqMcQzw0dga4Q6U+1p3wOWzLcEXTcZMYMF6i8dQ2kIPQpMlx+ax3nwp5cnsm6FIKsqiuJ4wK/CeDFXpQLbkyYxXLXn2n1iqJUW4h+TI6BPAtFwe/XC1H6pu48nrAIvND2lPtQE/TD5YkmVvKCaPsMx5qOfHA5/T/HRMQ4t/pM4EJaDWrKVc7KxfLPfX54Xi+a5m+geZXuUxIaKZpgv5khDr6p90UO63h+cNlWzt7UTEQQYm/5F9sOsx4WBIWIaNrt2Dp4b2vNJc8fUu1pJLkd5lttOxcGHyAERC/TdB/LpkWA7OFQZ8Otujmy1rhQ/Ci7OaHtrSgyxvqFucQB09i/VaHQ0yAozIo1GHtmrf9ZLczEJVQ0CkRpcP3L1MkP0DOF6pQrguFQvrBq5A6rqnc9s0M+mruGzJS8trtV+ZeASj0hoRjxFb2H+MTsPM/rGlbrX/rGeL15knOs5l/0eXEut2QTPSuIoHSP8ZjpJ6i1C5vhWqK07FNuL8qmFMsHxA7WGcSs712jDMSS9Sx2ETwoQHudneXRpeCDcx3AhEantxT9+AkjAfuX39PBAjWlv2SWVEPjk7IbdvNDq5JdeAn18HI2/H708FZ6+/uAhNHISGSAEIPiAEomIZlffw94zgUzvHFSUyHE+D3WEVy5f2Wylu1Heq5hOQJboqNzdiHMZxv/sDNvEX+CSvTvBwxnaJRt9fZXDdkb0inWTcBoLL8GnIFQAlxxISz5Oxw24GCW/my1upRh7djKBT72qVwsc/rE6nqsJnp1SOgVU+xBNsNkSaXExNQ2XxQJqtwQN8X7116zfsMdqr4SADs9VbqmoGNHmbn8Gtur9ntQFzif2bY46IkixPnv1RoevWkLmImIE+QyedxJJL+lKTJXuNOrEaLA9LVow3+duXiOrVHfUYNT6oQtPYivHP9b6eBC2ms1S5H0jn1fkMyi7ClbNI3VLQqrCMyDKSu5YJLv/MJbPPlY5KhumyIBBoJ4FM6wl2XOHwZbooSP34JuDFA41bXXQyl7PELyxp8Id7Ym5myuuU/NoTQb8JXUpRKdq07psiXPXeuqLrQObHoahRfJgK7Ks2Y4tTQF3/+Z2P0f7xwt4twYnsYaKTrLjXQ1VYEa8bCCcrCOgQELCGq/edjxa6EI7708hzDNVFYmQIBW4dir1mnwPJJaglENCepDTVwSjS/4+rqMtyXVqD0OIgxtUT12QQ+hRwGauedp8JWrOLn+0rXXbC4LohAVEFxTjPVOhtK7Rdcu5kuBDvh3p9ZEr1oEyShqLrxsjZZwGiwZtjaOvk75vwUUFoPJgaQdR+RclWdNZvDwJXIUW7Qb5REZk9iidIHsJRFhYtTCkb9MtSaFaUDUGcAUoQ/x2QUkCg12h194UeFMIXh9YU3KWAxq5S49lslo89RzxAw9suf7mgVeihKwsihTpm/SYFuID68CDQw9wRMS90nDGXIJ2ObYEiko6XCDhsbbc5uCwBMQYXqrEUOKQkBlT8T9i/ju1V5QsZswjJ1g08JDyk1iLqZh9+1qQN76kX7OPQg9E3kndQ92r4FZA1/JGqVoNQDlYU1kjxeDDI2jJxKId+CD573by+JYpFDbggyUJ++02Cr9Q3B5Br/Ldj2G6MzCXslEdjhxKDxS9qi+l9zFNHMHb3QHWJRN104/trvGpsW+4Hc/A3vltp9s+KzJXNuDoMbWfupam1ZffkfIkE70hVBRFksqhNityyQRFYV0lGMOyBbWqxXukBMjEWAcS6NYIIC5sm/uSvthvSB2qxgTKFAMzGx5Kk5QW2xKORjbBHQu4IMLptpuQQQCMbmOKTxp3E7b4ZUL51Wwv1UpdV8RH3GcMnPT4GnHQ1tK0r+Xmec+aDc5qLxf4CxjcavVpfEVpGJQaIv9ltIEteDXiiSHlx8k6JxC+nfHyEwcP7dFkxj7gCPd2yHgqPDi3e56IplLXH858FK9hhMrUyccNyIXOnbMeAD2HsVhJJKAMUSENB88RlZ1kycaFSbfmZ9A5FGXhe1CryFyTeMdoa2Rs/CJ6VqrGqHeWYToLA46SwDj0J6UcuiLoZ0gGTLXb2KcUInNMX16VNJLJSyfv+xB3euKjs5OmHH8vwg6JME8G7kW37Hj/FVH4akOHDF1Pg3gtoSDZLKDBmq2F6jkKFSHX/di6K5Eh/bSyxpU4+9HvVQU6Y8TQ1MmWfVl1JBo6YSw/AMGonGi0aXJIySHemM0vQWVLCtWu8V+M5Myb3p7/SKGsETnUaFntA6YN9Supxhl51O0atruHTzMU6mNE/kHtNr9jwTqmm2xHUSrIjfDJGVCJDwATSziv0dFEWjm7uIkoB6IoGUQVjMD2tbR58qjhEoDb2g8dX/pJZc4Rlifv13z60UzJyMTZAiueX3KeOIB4jbog7cDd8FnHijbj90YKqIS/jf8kVbSawR6tJuTtieZMf0qQVwSBdRsuO2C5k4EQxhqXiv+ROoz8THI7gFxn875mKvY5cAe5jIxfw4r19mQhg8tBP6HcawGt2ODaEA/QoHSi8BAeTi8HW5QazB7+bhxE31XHsopexQ50zvTf7v0TPMafSHbTJ5xRU8XjIw0ZXyRKOF2UQgOaVV26jofBr+yogie+v1RSrr8+64645KqzT8cUQXPm60yHUlaYJbnxINyfMrZO1myRXIYFZxB2PDfhxFe2mu1/CLdKKJhXtJ0Sl3Az6AaI6ife8qETRk+XTlL2FSEFhzX5qYj6T+jD02NLg2syMZlU9M1Mev6wYrVT0Lr7j3EVcSmJ2Rv1rbi6SqsG3Dcm1Ckl8PxIHv+RNxkuiW7eBiYV4RXxZSw9Ml+3aGFIve/zzrRmA7DvCdbCXX4Ozv3sPwo3YndjXRGjZju0X2qsZsRQSXEJUY0kXzzocBU7JFvszFbAvjan9OJoH9llcB2qcl/MW/qCY8caM5+OIeUfFygg79zk/DaQn0Het78DyRyD9sGJNy+3XfPYKTbRS0Ka+mP/QcQpKatXZw6zRatCXbI03FHGWyhirN/D81hRe6LJMMBVZudK2kp9nlLTIfbubFvYRKQsiyX0A40BNUiH0s075vX68zF2zVzulWW4Fvv9yOGqTJT+6CH3LdE/o8TJKTYnwzPKtjv+Yb2ylJXNlOL3Hud5jL5W/cQCeelzAbdHoTFUETIGxiFKfV+lo45S1yK5BD0cQUyGo8GxnRiuN1QjODi4XsD3gbbi1bOdnEGbTdtlkZu9HWN30ed9G6BeDypfO2QOlW+l6gNMANzOynMS8swp3l1IgkBXA26EbhvOi+n96LFuO9EMsGOGgwnq1nnJmBzqigcs/Zx64Pgm9puHobpNCLR72D0CK6sTj+h95CiOicBDhcCTZvvKbJx2oDEnXoSqxa+wh5R3XMJ3XUQdSvktc0+l0/l+eZUEONkmjwYS0k0Cc37XUd3OD/vKI44Oz1BVHIxCvwYM1q0CpNuCKbWXxDTAA98XWUoU/B6TSvIYjNNqQtoGZJGeLdOYChcWlDiAMjBuO54hW+KCZr46sQAG98UG4vkJoFsdPwUlHKHx6vV9xptRPQ2v0pO002QNwPSXXaifL9aPHAQOMJqXO13AEHmd3xrxZMp1nq8oPZ2YX5v2iVFqfvI01+m4c98UuQhZRHP/jNkOYsu0f9CSaF1m3WrBlxN/OFIh2wrfGB4qXrIvUZ/ygmxWczW8ekal4hWAoOeys0g1gDR85D2mqunv5OUnIIFCEfn4k/GaYESvrO4I+fmIrd7lu5+4hOx6i7z3a60oxV6D17B1IpWVbYNmOyodmuAUXRESlthIchptSVLJLIEiJKMaqymzB13wAc1D0AJ+pJUunKdcvMR0hIBZZ9xYY+/Af4ikijI3POR9XYtkHcQAItRITmRq6OnVbGe9SJM1tUUmXggTpEl3DkH43cAO5zitIyRHlGUjc5/X9RnKQzHruz+r/9PSUpNxxR21+blU7t5AYze2uc8YzhOiOINl9+7T3Zp66TzVaUIH2LcicrnsJTOXg3ONuE3HrdPCrlor3p3IXn8mbpFAf2DLdutO6Ip23Ha0F0DIrLiRBcfAyfXTIYef+F+JcSXl3P5AGrB5zPPCkyeg817fg0ge6OgJDptdB/3E4W6cHWZbaP3dY+zgP2ankPJY8iestlzrM3P1tmAX80ISSDMcHo9jkG5aN+7BcVSGZnvyBH0vMX6ixi/+o90ezAMxAAAABjbtu18bNu2bdu2bdu2bdu27Q7RQU4ABTIfuGalczGu8qV6Rx36E7pQcZ/nqLf2aw07+8ltPAqlCXHnYpj7eZNnqCRNVkguSCNATnH54Mw0KysC6CPwSWJ1KIYaNZRKc8CAbqJVD90dSAe/jXh44Jd7MDNnIGlvg4w6nq4elKcSiumzrAhWcTNMAsxOScL+IzmiOAMz9VScx0WNIMdqxtXAqIUkRth81AYdHOUPaXQYySBN1qXWQ9+9rh6LHlkw1+uff36qVlVFRKQW4E1bEsfz0VHIQptLGX8TguXH5Q1W6q2Z74U2Gtqxa+bj4nhyAgjcSdKHg3T5WS9/UwAVogWAqYQZ5KVgkNh2gMjRq2n+S4rFp4AtkC27L47SoxB1OTo6Cl/FHs+fwOyiJGUTSrdmdOrLKz8UrDekR4/i3dfAvUfb9PLRfgI2+K7USvHLxHm7yMfbBYySF2uhhqn5LYgwiuwVZ5WgdPvqB7+HXNRIj4DKopVd/nOgP15yZn65VuedHt74h36YA4NsTOvAlNGSkuayG0ocSBgMW5nhaS6IX6adwQp8UKgv/JZJn13UFzKM3B0/f0l4mr2RVfqO2tQNolpAgRNtEhtVREqUeFHq3qxhvpHFO6Wc7UNv8EI4try8K/2eDHqVGrSW5GT7Bag/xdDxROPZEA3mz92H/tS/bflkGTyT5yAIjmQPlbIXXfltNjMLSSXHBwp8R8P0q2xbxoAAYPBXwLUR99d61DvKS633qymg9D2Vij725XDAit2sn4ogTbqn+Yz9Xg+vqX4wp+YDtPf6bs7p2nSzfEZpWN8TgRdH58nH7vCpyxZ+ZPoCsx5slx/V0eTAeToDTS6EILlsD1qVhs4Mo1xDkTkFVYoKtHsDFDsPmrUOZmR4ZI7jSAvONxUN0V2x4gm4aN7PnpAMtcd1gbKzxRRiPqssWlDXSkjKkq+3GPy/pYCtM9hpgeKKhPJ0Ex6OctNHEf71AE0N8sdLgIzVqj3cxN8ZwG9K90KODVwVz4KWMI7XciBq4bWk9U+d+gMOgyuLUW8DLCmPgK7mASEw5sVSTvlafjzVVZECVC6EzLjp/GIeVfxO2i4FZpicwAaFc9rQL2Oh7zZVzVv1xuRAgjJ4F6jn4EnFZRRC5+33ZFC0UPMbNmGpnzOyjg5UceX3QC7nf5aW8aHamXAV3ZjgcFzpiUDSqhGfBRenF2SkoVSUXLzTxG9MKIBQxphNiiVx9hbccFocsHQvYxtC3fasvN8y1P29R/B+3/D3pxhSavNazG3ghRyRNVqHyyYS/A7oeK3DOKR8O97ickdMuJT58dJspru3GmiO+KHxdVDXklNOLxdYO4MscAxMFdZUl73ik3j95GxTVgTQtEKfuxQ+HPQ0sOOOBtXRfE/CjzwMDhdkFlsSATs017hNEKUwpdIFHl63wwgYtMyUt82y5+M8hwDCE1PGaRhcKnOLM3y6pxj5i1iy5k8T1tIhwIL7UsVPyC8hUA5DqUyFTOtknFnaqUUQYBrSBUZcSU8+ZHwyrb5PnhlzAXWXYpYgVk5PD0h1m2FOQ623He9KBj9LmcP7UC5ikIaMPzNd18y/PPq7L98LTq8mgt4VQtgVKurpI/Mzv+af69CVJfF4a0jK2gM+E1iEallSxpqHtMW0T3pc/FVA4pAFl0HMCm/UtMEsWEMsENCPS7Oq9tqYM9HBD4BG96VSl9C37pHWQnZUgkaVNz8w64qeEDP62eVEhe9Popgbfse0X02itstrU4GCuqYtByhloGgNGAn4CfYmljxdIJ90157Qctp6Qgyv4/UeTtL3leeF4YsDHSM6ZhKIB/HRERpNQP8iDgkkqa1Se6hq5p+qsURFZCSKWl+t3ea4P2dmz9ABVbV9/PW1qteSLR7GZWb9XYESlawld/E3g7g2PKmMCw1HtFONR1C3DcJD5s8k9gPV0kkU5OZ+TirImL5Vk35BsRszmrRsuver/Dmpg/C+f2Up75PXZSCqvZbwolX1wcdHGl5cxfcDY+NE6xgGAh+lQs7AYtFnxjq1lEn1QhsKasfWtx5PHAXrsBls90fPDeX5+nu2iWif6RN++TRdi6qYwnk1iv8IfnqTOJPdNuYPCgltSKu4T6gPomyolYfWP/GkKZX4Svl2Vgsjypejy3TkMwNo/D0JilgEw1YQTZ7SMlFzE2nYjphIShXNWUFjd7tvd/qFtLchMfSkOhH4XEz0YYUM6kkerYvjTagjcyM/Gq9cMCR8QCIkpE7sIk7ql/of9dKYfgRGQpFWcUUSxIbCSmBHbqQe7sSULx7QUqsxr+UGGvzOPg5kLvOGzuKStBSqb7gqp6srTpvj6FSX9ohgYb6UJg9Mbpg1kjoARBzl/DLM5mmggGhAxGDYt+0W38TztchrRpC+gvnDr/sEVUfoQgOhnGHlk+RGJNUQccU6H4LKDAAUdYm7HYGA2Ug/iDpmrFiHkSW8raWqJx039Xar83qzp2SYVMN71HcsGhZiiVlHJp2SDNi8TynKH/PnON91DXUAaOEXk9yFBHTomkfk8yFNK/cwNc2NMgd3gcAKn4/HY/uFnaB5WMy0rLOT7WVWJLO3hD7aYC3sChYwzMSoPldPMoFlYYf9l/boToKYCdMfyOXciU2iz9xk+5mT7bVIZ6vg+PKHLqqNP/FKPsw46wR8fl6cTrfqcDIAcY9crnjbF6izz3iday4PMaUKv4UNKEZdcG9NOAKBUHkgrrzoN6Aj10uJOH4SyjO28W0u3Yi5oHIYyX+h5Lgd8tyjXXvUW1Y6SBftm7IBOSHDKGCs6zJjsolZo1a14Ag0nfCheUVcROGJGKIuW0Ro3SGnDO1FQTfOkV8nbO6cyOTzDC8VWtCBllzf6/MqaTfFtfCxzQeBExncI3k6x57tUN5SiQ0Yd5N05+MOKGXmMYHG5AwJ1nIkB8agwyrpUdXjnHWbswls+k5LqM4VdT0003O4YVcrDcknRp+9noKGZvy1lyuXvMHrVkI6OFd75soklxlP78BgoCGQqlf9Bi9WxrqltFFFzc8ZD8yJLkP+rSsoxWEh4vtjdui3gS8JaZyr/ZnwI45oVwIpUdkYryJOf54IlSXvNdJAMMJidJ74kfesEJBx8k0Kecv0iU+ynlZG0BpUvcRZLo1T48NfiGwPa6K+5zUVI6fiw6kP2Lv41UcskVqI7JpNbri52z45wqEPTl1xwspPCh6KobyHwr5ne8Z40DerJ3qvZxs045hXCioTtUJmHXTzbJ3Uk3NphZjrphioUZGn11dCQT+VtpyZvtbwtZIO8+jCOFlLP1MP7DJzm4WAmMpwiPU6jM92Tz3tKNxL0APcFDbf2a1ypT+aHrofYFDHdwqBnKswxOVvFS75s9PsHsSYOU3zmqZQBXmvlNap/7hU0ib/LAwxLlvvR6cXhVn/0mKYNWSfowueI8mLgpFKFdUXEjPwo/KmXL39k9HHIPQ9PEn9zOTK7+1to6hfHvEEN05IUxgTIVIR5/SpOSbXNOBa9hJI7BKcQ/uLoixyl6V0hGo3yDdIqP+Ol/zqYb6apPNQPuTnqQYxq+HON5xti9s3frE8CutP4PwNASZNQ1NzirnW59wBmKoogcVfHKdz3Aj2i6gpW0XH4F56RkvUOXj+eyosnivinIWg0SQwpAnsWLaAx+5v0D7sS5NnTgIera586NTAyFCjb3Z+xp/EFT5Nf/rBsbye44ukOUomj2jtzHyCMZyuxKVYaPekEolOzSSAYXjrH6T1zDWWZH8gqQOczhzUFo8opGA6JA0+u8iPmRm/jNJuwFOMvBKcEYBABCXC/3Lq/UWHZ+wj0dOm6n15iHp4cNd4ZbmV4srfZnuycP1TNX+cNKnFtQpOR2aVCDHJN1YRtzDYRldUSeGH8oROcYNY7GaJdGSFEZCA8odXIOEutGm+i/ggN3ZlHAeeJlm74nHpHTxaTuOHEapBf5IK2KT+ne8QU57rMEhe/gX6Fr0RXIK0D0UhgMryzvlqxqggmYEr9cU4t5/3OyWBexEyc6Md59vHwgZi24MO+PLZOaG2sWBIneqHtNWdsFtEIBjOnCUf+EZb3w+05hMKmsLf8NyGwvtVR8CkKjXOLrdq0RpEg1TwqxDM1QvESIdV1ZZJVU99yrySFuD6U5MIj3scFSFX+NS8Ql/xEOYzTrEBeBUXR+zhfllrmhIjsYVvLAA7TuXJ5kNahBpHDGjfDd8+j1sYsVjsviAOPGMdbErUdDo+qlSgNadkmOQlqBsVwmLN1yZDsXr0UZkwNdEisCVqXtgv66l8hyrXL6YaHraMCOqVBZXQxlkcsqGtw95/WvABxOm9MW44/FhbaWlCCKyXrVj3zcNx4egviwcZnFpunrryNh9K/hpS38FVO16oo7NFvhCthHIvuToFhwIkjWvkJEuS1klOpJb2NNiKgLdSXX5/8T182L37lcYcFNVDjvp1/2i2JlLmpZp4YMKQUbvnOp1Wiq/effrTVJzLpphO8s9iaUotnphZTTNKQOen/1IJn3H1Fqigh0vDymQZ3phaq5288NDI5YoKQcZmu360Buh905jOxNhYFvYyGLMA4p+mBRtQ19vkwU7oVVdHtwiPM/Kt+/v6vA0l3rbEzOAf2/zscj7iUe3oVOaCZx7JzbcGiQmFgoJaxF+6XN/7V86xi3sZ3nXATRXvlXmxHJdurOCUnHPTsdSNAmAFuYJ5KX628ULhbENbQ6HkqLdHl08DQWy7SryKF9LgpU707TMv34kHCOcJC1+nfsV7iwOfOXWZuvm+/kwwRm0TKS3sYXiQpxiRqMX43IIHbX/jBXj3++yfJGUt7xVjIQlgHOeyhSD+eV2/KRB/U6W8Bkf/g7WCPG6/mFRSf1/az+IXSAUuaKi12u6BlNmxDgLFGLWFT0o75ZZMlHWWRSAHneS3AePU69a1DlqXQajzKqCyBQOkhzPfTtoLfSosrmGLYiF448Ut3k1N+hjVzk6xO40TCzb/i7xQKL/UGqV5C/cYGvvXrL3mfC6a8mBII9ySDROlZrmb2z2BSCx9gBmYrKFO7cgM4MuWpfJr0/yHJEpYhNoV68t2lkZo+n5GCBKdZBl+R6AylpLkj/W8Qr/1iiMgFoYNI9PS70Lci1Dn8dbF8OY0TgrvWPMEcOfCRF8ZqtWp7rnmIJ4mUf1Le7OxJPdW+152FMxPHJf0L95W/PVHc2ueVrXNUu9Tc1J9xJpDSJYLWnjlb9PDW5i6Z6iKb+52CDC4fHseWAjf0K/6WfChSo0NXo9t3O/lGH7K++m5CZTmha+rV5aCG3WXAQP+7ZeZG7D6dw5WFcrxy0CfwSp3cV96w0vXy8B7S6wxlHlcuQdhwixhklulxmwwmASX0nmHB0E6aXGdv+ZU+3Z5L8mt/ItTlsmB/nZ6X9okeBnVfavvKvcXRu4h6at/+urUlNEHYMN5wSm+D3GeUKldWPHFKpYdXvLF4RUW7NgNbDPZppnC1l1z27rKgamiidg3Jwa/3E3km+6XV22b27WqTFTNZrgnNtLC90QCjv1EodsWKsR34Dxdz78YFFxUlnLl5eaVjGi9UPCIgaPjESW7hu9wKX8tF9Vm97sxk9kcNPrkEOVbt/jg9FSIrqx0CWJS3z0c1tx+bsXjnfnkLnWa6izthoxwgQYsg8zdDnLdxPcSOeBS2htpjIJWUgDUK265WWXgyYhcQv9JPVGutuyoiohC605NMfFvdKWqgiymRQozcn44u+uDJKYKF4db5jGUB//HxhM9E4IA1CNVZKigKAHcyjzBeccn5g7bLdtZVoGMrCsgKs77YkZUdLsdE0dFknOkUljtPOaXf2NTiioR7CSXR1pek+BmqLLPSot0fuSnbeLDLhz4SPImjQWT4tOt3vO5HrsZTgT1XsNZ92R3Oq87nnKrmijJUPkOJDCUt7ht59xBZHbT7Q+71iDH3xZhy9TDaFL3YpFMm1huHi9xEBfJAgXYBM8VmTBQVHh62em66I5i5qjR0HPdV3uPa8PCvpEYukinod31IHUhafbHA9EnlJeRGWCyOEUT6q2g1bZ+W/BmSjsp6+IE5M2Yrmc0FzmpbNgqWH1yjyR2ON7sh4IF83L6nDW7FKEHKIvQHanUjz3gcpaPTKJy58YBkV0IIAWPYM0pRGDLfdJU0AAnOTyYhfV+E+rH6T3Pn5WA2utbkozTeXlSKWaWSyLFk8TMsZn47vrlsDAbyqsGkpDBvYihwiBUInNgSRJOkS/8bZE2qWtcbh4mdM2aHon+39nXs0wIx8KaJZVvWgHhbDl3SplzzSlBwjbdC9O/KETV+SNdq1AZ9zkgIBoO4XHQ0MSh23bqpYhwxEgAe6Pe9GPBwiThPuc/dK5D0J4/XWEmCkJsFmMnxpBElPgdHV21kxMO1rm3YPmn4q5/NTSZTED8Y11WHtuba4MjzcUQNQPH/zysh96BzzS/BYj7fFfXMXZFfpicKQSpiPRAQ+ZrrPmpSqoGCrVcoJePPu9wdwcrDx0NLwNpq5S6ehQJ/PNIxVNL4gVmQbhAeYpmIQCNCdN+g+R5jQoLEQqs+8fH54gyQBlXdbf2iIK4IoEwRCkn2pM2Ym7CqQUPz37H7OZNgP8p3ySL4le95kOOhXEqJKQt7y/MDQZRCmSmRgTEJ0s58Riu3yMUhtB6ahMC/C138XRzblIe3oyZ6knz2ak29TlOiuU9iJm26NY05sZmFLBD0rmDnfk+vUTwDQjIvFIJFohRH6nlKI5MIjL3I6n03d+0EO/7v56dDvWuB0o03Z4sQwLO4fgR7KPSytYdjj8W6NV1O+R5nzUWD/Bk2bKNxnUHZUAQLx+4/HTnFsUJ3MjqaO68lGatDFnrmh45e29eZBu7sGlE/3Z4zyRdpkQhJoxgDt6KuM88DqKnoxpTiTJ7u6O7vCSYehKBewbsPWJmyOnpsIqJ8uNRTFV7ufpJlnhv2Wcvw6NQ9v7aNGMNJpsXe8IWfCOhTaBd4eb/NOp2Ns/IMgzR5KdZRCKXQEsCxMASdR1T2WkZu/tm9FXwtOLPV4GV7Hx9Sr1B2UJ8YMUjZxmZeb4ZHMXwzuuPWBew4q8fPSH3uGMdRDhaFZFvxYaux14HMUucyicaSgKhfJqUAg9I7SLbO08NZee3KJ0bCH3zo5IwuKhRR0/9eaonmM5zTHVE+geeP3U6/FuCnivpnDTH+03jR67ZO+y6FE1ZUdKWIlPMZu09y6ET7O9Qkq0ZHeLgBe572/l3EzFbOat49mhFQFdaMnb+J2jt242GhGDnbXeCpRzdyhtIjppCxzyVMLhhZlo3iP902/MpLVlH6w+ejoCyuEeO/PmMpKpuF9ZERRbcKrOt5IBOazdAIwg+VegK6Ez2d/gtm6AMlXIMFF5ZJyeRPdiLR6ML50MhNZbKlnM3fNU/j5cY6m98UYmlcKMT8xRp7QleoLgmelMmOAohx1dKZysk7o+ssiQRT5yHeLYtNvA8wtvnt12Gcf0FIbo5BbMk73bW6VsX5BGMYpXOWey5o34Zat/W320ktSrjGeVv9qQAHh1aqib/Ojvawop1LdDkq1u/fl0ZTsfXSzduLsQ2rTNp8kp+Bb1KjgE+MpTgLDcgnjBSyVdtrzjTd9Yf2aY3Mjld5OhxOVLvHAiqgj8LbxYAPtlp63VCeU06U+MPvfmyKEccaRG2CaOtmYYTd2ErPAJRJG270CxhoNLcoyyjd7WK5LJ+KufplePwsmuvHAT8SJAgCdZQnCmMrU61IQAnuHnvOZj14IzHRDD6JPs7XK41TdU7txd0/NG40/PrLY4dyfRvlhigyg42TLgD6u9X4MkQhKIeQ3nlKRVeXs9rSouTxXFVXbPn9wChXVBv8BjoiN84IzgS/19co2yFlwdeEoSVK7kssMZLpU/8W7vp6Qry4J0RtgcLZ4gsOdAm2jTOoutaUmHPXyyNfSj5P3gkEWSwqJdNnXuCiFNLADjlEFPUnoUoujqnGCnmZRW6NL+M+UP7Rsji60nte4VwvR4/zvyhvVB49wmBFBEEmZ+/wmZfEB71xTMNtybpH/9J38uaQ+f2wIE+FTHUhGDoqezBm6HYj72PNVRwRVP50t/IgL10xOwWPMJZUusnZBs8c6HIerlLcMnEU30Nv9UAtFodsH1Fop2FzOpWNNXsNMEfifqSMyMBa7pBrDimx0Wnw3FgIufafKNWzmiT5RmhZnEJWFvdi1nbPFgGB2h1y4tTsFZ+WqxHw0hQG1k1XZ4Ahjs4/I8GMcsBDbkrzlX83y5QgfOhvYnsoPiYl9BBxUOid9Xp0ds4R2gG2MBvVANMBnR73nHD9NHSytBHYiOnpVF4K25MLf9/o3LfJmlBhQxHjBu64vl4KkdB57O+aYXHpx7HKTAub0zwqW6taDt8lkro9riIskUKqlnluU+lnIGeizL4MP2piFrGs9H8ZSI5k+DwwNDCWVKvnDOJBoEBTXcRG3foC6suEUowK2nw7fN1Q/LV61RL8QCWxeuYP8CtOqR108OCtQHTcL5apFkKRachaDHZGNUkLV6VL7xvwkg9yi3Sifnaaiuh1Q1WZb4+rU2lFUIkERN38Rv6B8LOaLRKlLdmPLHrWk1/i+mLPuYojWk0XsNGht2lLXG40GH97du/CYXofjBrdfxydmBi+0nqT0DqYZqGVs0NfM7MI8oUL//iVzn3mmxzAReKKWPpu4e4pCYGL4wW4P2I3y2j0eDSJKKTBuotwS2F2PlFk+wJyTMJCOVIGnUlVAKayBe/E3siWoYGqsaldeGKV/XuCQlJl+yJk8IVSKd8YeJ8DrkoiEasTx6R5/8O05fqGlzi0Y9maXTNOLNSkkT+jQlPggBR4X9hueYpJ2hOIjpqAomDcswaz76xCo2gk+W18hu5oe272e7MtkxeRobATDiGREsrERgsQUMihIo0wnLoKsQZOBtAYAzrOaImrBOI6+KwmkIqfzI0ppLKnIXE+xnrxNBMJKFyw8DW02/iOexgT9rLdE72Y1eQzXckyZ0FVzqb6NjPxIhVrtLX3tHwfuUhhq3LYKIfcjoTBT7xqG1fqKHh0oQIDAFoeM71T0KQLbB/n49ORwasQZbkbTI8inxqF2EB55v6lo/nahCvZ6drrpTqrQ1XTpkzlD9aF6dgaL8jo6DFPJnh9HWtbjDQHt4cNYSAZI3icc4vtEoQnTWij90XLmhynhVGZVKINyaV0LrPCYLWob1Ix2XEpNfwqByyp7ja7GCkF50p2T+yFfnr8Ex14zGK5VqERjEcUE4YUjpgWycGGmrc6nyYUrMMj3oTmsy2w6AG6AjnmE3yPNDR6aj13vv/OTnE48X4XoZiybeDKIvQM6xCcV4IoM5XcHOZGs1QdDIoH7OqzxYRyoWt8CsXgtX5CtScunQ51dT8c/+7J6WLM0L8nuLX1eXD9aRvfhZhZVyYVHOV8fuKBILROQhbQGhiyCFdfIznXATWzDVFz59pf7ohPMFxgQ07ZJpcYSfU5Io3DYgwv3tJRutP97S+Q/G4XOnmJfF9dgxHCYPJdjUFppfwksiuLud3nGk8MjuYeX2SqXTHuo+lVhnnjX7t7IFWV9DFPay7dYHvqrfD+BNPHZV4/8yyLOMpSygC/LGGVn7cKnyDcglCb9OTlde2WMBYGOwXrwhlOxukcDvtTeHEzis5UPv64hlFCC1ffF1mg91v+P0+4di5T5CVa8DPwATNwGw0ZMov3j1MjNLnirV1B5qDcNk++6c7/QH3SZ5Ahje5pwv8SH4lqJG1Y+1Psr8k9mmmX75K+Lvmv+hmLkHVVK8hY65/S2ieky0djtFKe5yMguWjEVRkIEW/fNIpq7I0Ef2cUaria2v+VbB+gXRyca3h1W1nJtwTf2HSREFE3F0fyKbC9DWH6TdyZ5FsXieWaKaEfF/dHHJC1PBxBnlCcKAnNJZ2fTnuRScE7FCfie1Dk//y/KDPlYHMJ5Voro6CXRtIL10J8wtmK6MA52SWS43GxkIbEY7n+nM9uB3CZOnQEMWdfpLD4Qr0EhUftiZq3ld/9HyQZccAHwbEFdSjQLBVgmno1XSm2wGS6NSH4zq+z0mSa2Vcx/APi2X8WF9vfmAVKrNzisGRanbKrSZVwASOi90/8ZDE/+yqYvEB3NFpHhgFor80048937Eu0sj7TN9lozXvPqQIgOaTJ1BkUiYkWAAZt2DoaQVyNcRJvJ0aaA5BMAeXzbzytGdSxx3nlFpMKsrxvZhgNz01vHnbTenQ0FifQHTpHt+JqZyKQv9TldrCT0EPYH/EFs7vI1Q9K9nxsKegOYdeXUgVDhcst3888klGNnO2rSwJ1LZ5iDbrIfWqFZPCdXLBzw0XQnGkrsDwlfMz+IEwg8HKtxL4DpBXtBEn9lEMz1svHOy8mEu/xD10gRrB1kmsGo0yBHgKD6WDRW4P66+FuP16NJqOmxzXwOiznKmATM2JbMCMDFcWyHRFTvO1+nKcni2H8+nUNLNK1t/IS8aKpQmovU2cQCK2lElzc+6zJe6QJrsSP3Ymrkh2zL5/v+GAUlPiAhHexWjefuw91DRxtJmnB9RvcwSjWv8CLILceAt4k277xoEo56qdIM/Umpgpqevw+b0dvM3/IB9U9WgjNn5S07R4zpeXhpsRSLLdMKsSC0SzyaBJuch3RxlmjT6dqVoLx36vRWwKATCedaTwMA92pwvf8/GTtGisLBq/OlkNMVFYI3N6saITXgwiKihiNmfceJqoDpLhpJ9x+5NAF3ba5Cd3mDU7MeRCLJX1GIPzApe7w17gdcPnVhugUFrigN6IhjaxzyOXynKUOt6lIQDl86JpfkiDJXIkxLIHT4EbQU+K45Z9fItcJMbKwNPeydfHpSWNEjXluPb35J4jHLHKtJl5bVyZio70nMsXs1/NEIFnpqeXqBqveDY2w27pDePrdxf4XhAkZqsG89uAG68cSXSZpAxhD9yXiuN1+eFS34Jn9DDV/ViymLHkMAyU6mfNAKjYFLOIZV2mJYw9N2tWODjeRU9A6eN8u1xzKCFA3dDaSZS7QQpEhFKPt5hAwdosCKw2KPhjngkwHX6HLP2+zGF6UoC42NzG0Dl1R4lXE4OAK9qsJVhos17E9BssTXKZiP7DgeblCionbTmSecM/u4CULv1MeHvDDP+fYTJnkGXBBm1p0LBPx9oumWtauHnh/nuSSeWsxziY84Y3kA6W7Hg60I3eZq2Tcsz1p6F25ipn9h6V4I3/AAuZimlPwHQQsXta30fOH2I9marehQmKe7WGnU1PH/t+7MVGuqOwHsyiGLXdcDly4mt2eCRM3q/LTPsy/yntMovH5kAzdckn86mi1OyXu3enImGk1K95ecUsaozPe8l9VGF9Iqvh90nyA6JiypztSO3E5B68I+uvQpPhRufOZaBnDP39ZwBrfPn8LYpiP33F907fhmQjjOm+1QX8qMvHurcMMmAXP1OMJnhH4n4VVr8ytiYRVWMA1EzQkoYA6qWD1XI8od4tA143p7PpsxRzay6Y9kX06MaF3mDuBmy2d2B1dilCBzeqsFKAX7iJeNRjyGCctD2N0V1XGhADeNl0KF/UlojRqeXKrmr5p7OD1lYWp/IBMEQKrCOJ1ZNy894ugVJKZjQuFSGWojHHCWnJukc413RocfWbEHdMF1wLy918jjhrHzbDVJo1+KQFBuQx/skDzB7VaKwiG0blk8Ttoz/Ozsk5BuJiNn1BJZ+KqE3EnxhWOvCkJWiBGIQmtzg//ctduIPr4/kpfivuEBTszGCfkQ1FsPLAbJ9vuCYcOZ1UmZb8Ae7VpLE7mvXj6jVCoKmvIIIeq9MIse5AWwJws3pXgSYf/mBLHZVohKGXc8vxp3g3YOhXcJLoEdshGWU9IG5o2V05cjSVtl4KiwkbTarTPGF0IQ9HKsBXiwk5fSBIau3tpvgTvTq4HGl9YCGZG0ajN7y2+U3lCsZAv9CR9y1K5Imm+2zDfFlIRfN8o1SFH1U1Y3JZvkHA4SDBNLAHexnpUc4torUJZbpl5bxG4bPKGd9J7/CpXF7wmNqVVURtZDasWmecOMWdXzj95C5eKqY9XgJx9lCIX909p8bcDPEfK6mTxSNE7yi2oQ8jLAJG8gDgxhZMvrp0MXPANediIaV6IGWth6AcFzInsZzv6DfRkpeW1RluGsClN4FzhPGO0QJc2Dfu7pzfvy/4daOF5F/zo3pqSTU3YB92LzrMfODLlU07d7FmZYIyiEK8OulcJ9xofNYhWHlkdFPqPHkDf8KbpkA0oGk21niJXu1r/bdPAKYwpX/nTHQNQKKPjopYSFcQMB0eAibQTFBaHy0d0v/+Gcobw5r4M6HQhRKpnhDrEXdmtS/mJKMPmrSjMHRljjKogbRaKBTiwobXkOvhwSqicEt7uDiwI5j52EkGKOlZyDbACCXDZA6i/upLl1kJS2B9sK+Ox0INxz1GjMcDp3dtE4ELPxfmxTB+ip7TYkx0BF55wXW3qchAXbB/IBfc78gW5aNDp1Qu6U+suvgTgVLwV8xEG+2OJ5ps1h/ptTtUWIhjpEwuBLG1tpDzqsNApH4qg4tSs0usSpBZFmetCBGUBe+RVJiwC/DrItNOSx+j/XB5lhy6Xmv2TSogqILBZyx+ltBdO47s+hr6MLPIovM7tqs9g2H1Z6RS3CySniK6NNjtQawlhxWxOIPTDyRNZ57eA3ON7ifERN9olXGZMDcWMQndVEKdBM5DNxPNh1fdTB6bG3tQahYUuK2dRlFyzTDyp+0M1LlKxasmTkN2UHscasWCV+NzKMzIImqcqnddfqlHQ2EAVzlTPQKPfMtKHroqs9s7pNBPhND66HGud2JBUuUCOoCbNkAiybcl54tw5YS6ajVQbpNPK+DrQNKG+eUYrUkkCENupztxea8Qib5xN7dtWab2ypUfNZCOotX5uA6Xnhg6aCYIzRbr+8IZdubAv7uGEE69dfDO8xxTYGLItHHfb1+w4yRnWgPoevOQsSMtw6yQUBkUrxowwVcrNixIFdueJeciLRvll8fusc1nq0z5fTQZNzguqok/v1/Yl/2jVH64IG81/GuZKk6ZeZbhqVXCVXwYpGiKMaMYLBMnFo+VTzKRVDwatvbVMp2Qo4aGrV43/kyWOAKSA5t/qlTpmzqMPBH3fKz/MlDW+pL58Lt2G0NolznUoceiNa539cTWZwoPXtk+NeLJqLZTu7mhI8yINY3A6hkC683nqn9Ircrc725d+AmskCtpFXXfYCCL1g2EJRm4fgf800jN6Vv1VE/eWu8zljoyCfB/Kk6fgGPUSZRMlSG8EjYnk+dt/gzveSA5CLWG5Y3lJYHpqmvKuE1K7hzhurkHkvUGMRMnA1YzAhCsygbBNkkBkDGm97dmTPzNWKZbc2d/ktIIIXHWt9OsVTfSuHAgIHqbHt4iSdkaB5NE1CV1G3TDEkPHrs6MalKIfW9B71ELYt+VuaCodQD8qE92ESjS593jWstxE2/boyDJKaiGCR6U2UcIBQZvRGeugiJws4q3+daKFnta8e+h19ecxVlOqtSu35xCbj/A4S0aoyfaSip7uRh8E0CfLk3IaImO8kPgpeMOCvUGu4XxHywo5Ehw5DJOioczU8a1TOgzJC+cOLFGLF2krI4nXwduKnSDWk8ZbKGzjw2ld2OGvhDHyDKUT7Ij3Z9rIwI8JjDBWUI4t777UoGJqkMV3TT7wMAph1lFJ6HApjP4xVIpQgKCs/FyXC3dOVPDtHpag0MxqsUOs82WMdhjN9oviYZ2PHvEbu57g989DiEtRhSZU5IQN+G1+a7gXaq/7TCgbX4OuEL1Ffeqx4KU/oVaNXeNLrXBKv6jA2nfTMuB0PMoo3vrtHb2ICaOESctjlFspxxlFEgVgT3jelLK5NKr7G07fwR91JdOluwFUzTamRN3oa2HnC8hq7/j3IcBo04nxVmugYH+nn+po+qN2t584F0WRs+V5vdu/P0GlCxlRqUNYaSFDAQSBS9Ccn8Ldq5WagDUrPtuyUXnkP1zO/JnhnU19aq/iyH6SCXdqjea8hcBpGWa53Cw4+A7qDmohC0lD3Xtmmq8IbOH/1hNYZeJWN5xD/C+1ugsmc2xtfeqd4qTW/JOj/fHW0stp/qiul4c6cTUOtXClMiu2FPT/ZB05Pm5Y/pHh4/OGco638/vPF2eIf3okfv31E0LKX7wlSgSupY5rpm7cvnK1+GNE7KFRLkbQu+qemG4X/0QLY6J6ZyUgjjY3E6EzMDdr2YSpLzWAeLwTyvH4/41M6pnxiGILyiS3Kd97YE3qJ6aEsjeULOa+d3htfI4yimr8YWHsowQ3d25hQENrNWAO3MlNoQKKX0i+BA9lc9lyZc/nibzwq30Ptoth/xvAAp59UfnaQKNh+HdT5ItKvDxOJUauMsInPi+7lb13fB+on0++s5yTVpAjIozv4ydC6ppr6mXHJYGmT/YT1mDGIzVPMonvMB6xQiliz4rMatHC/vkmhP50yDn8mjiuSmFWwn5wZpaRzKq8FKeXuIRXC4gCTYkiyIbxguf21Skf+3iY3Sh8gNWKiAFsYAQpYn7Azuw6bqH6WMd8weXaADv7mFhS6RO1BR1tAdd6kMgZ2tBII1tlJbXO55tpFbY4vcVEbmGych6bxg4irPBQoWY2z3S8vjT8w46rYQdAYCLCBUqKQBl8ZPGZOGnn8nusJING4/Uq8808V2lKmreYfG+Pk/XbaUOphHW9mxsh0JROC+zbLD/fS9+MXeb+tdMh7w+jH9UI8qsinHiuc3Vyhnp09+otqYuf/4ddRkyIRuoUPjzzZc2XH0BTgZDTlmDSAe6DD9erb/fAg32kCBQWlAbitqadR58JJuT2pfLdxYVvEc2D7Q+BGPe8yat+uoM2TaJcp6Gi1g7i4J3bC/vRRulUGw1IBimEy8s0dVNH2boh5sppehCx+eKIi51sVx+/p48oSszcXpMczbKIxLgIHB+mJanPgjMeFHKQVO0H43zbA44DB4mAUkI7fAtsVin3iiwvuaPvzDu2j+EFj4Yl+YMM/yKuFL2mSHyUCfeLwSZ/ixkAFZ4etELB/Dujj0b111pPaeZ0sHN4NTf3Djan3Gp37XdlZ0bBLRdQ9jbJXhP+fmvgl3Sx7mEJwKWCPujhcAhkeBCwG7FRCsVSX2GdLt4FS+EuiIG+Jk+d+mjXfMJfRsYO6lamLagHFpCZGPdWj3aPJ62KUtIICwa4vRQ0ytDVo9iNjPx2qzJ/pFkU33fFIWp/AcIPvLmpxNBdsQEBTK9/AtCUrN2j0bPWeLfDVD4I/KpJ1J4YEvak0/WF14Gujs0IRbSzbG6qQgTGDx3MK+w5Xptg648sxYjEsc72lyRRvcRZiDqAwGJmxXNhbMqnoCOtyJ1o2EJieYC2b3jQ+/Bl27SgoTdu2s4sHJHIRBTe9qGVsIo3+B4hWU4Gk1VGz5A/vVufnN5BLl3a4JWChWZw5285j/ksBT9+41z9TBwHOqwFliwrpzdoQUPQ5f69MgHXWcSYsEyAdMGSebJUSinQY65wDphOEi7OortshUHDrmJbXXYti6tvoNHu0OrDmH0hzmhGRRDtKSzjXzQ9aLdfPq0fAgjo9rk7WssxxtBmvPaeoPKkxJOZWb11Bnc8QwiMvkTdxDR++HIzYzcYG1WZGap1k35SWvjShTJyytDN4VnW3/33i5LkR3fR00NP3x+YR7afR7sjkPsIevxTYpL2hsI0lXAWOfpaxmx5OKP7wOz1oYYXZ50acjRYAGt5L+yRpqkD18etklPD+VXpyiLb/10fmNd31JaUnkaDAn5W6MGtZsj9Em9FaDnoNYfdEJbH1tulu1KPXD2w4R2w88GtaVhTynvGRZvMBoCzHwiBhctQ+VJNk7aeEnbufTUOxn13HNau0tr0Yx2v3oLdsMgI5lMbuv2hE8L1vcGz4ebShJfC5gL8m+qJ03AmWxYeJYJLaghzKF8iIdUCBOwx+VA/St3O7tvBpv2N6nfBs2XUsQH6VHhAhm5v0GKeDd9ZbVn+WPoiSi3DdLQ2pNdT5S0ejyJewW/bXSqz/Z1GsJ4SW14Vk11BcgCnWwaDF2wgQxL8ve5NuQgaq/AGMxYX4vV8X4RytElGzeGvuPfN16faavXy/GDa6IEvxySDWHOYJrb9RuaQHC6XM1EPWcReP/g+qckhX3UI24XHo0K6H7eYnIr/4uOXd6ZELi2urHNK0ncjzmCFe55nDbqKM92qu08oPoqizW8QSXFkEe9jS4MujbAe2Irg+PRLNkCLGSieFrKWus5eLVvaFLvsNSU5yr+td2pH1qH4uB0Htyv49++Snv+4fDk0BZ1J8kQ8zkNLOdW8QI1BzDBCJFdYOKARMg3wnfAUgXPxu+CF3ZFSeghcxXQ2VfVLwXNxkxDpSIMpTFTxNnIq3pT1a2SwtlzcwV3ujNFvN5XcnWh4UxqnxdkALc3rF/I220IKvCnhXH73u+lI68eO7D+uLW0AvyDwGK5skM3+S6srBSD8F8NSpbayIGuQY3k+O9DRmXSRoRCEIQGteMo5PMD8MkIAlD8Jp9pfFD5ykN8tWPpxmzooaOH2NLz4op3n+mudNvu4J1uzznyGQElW5+X2+ccGbkfUgYwnsWBAmwSj4Z+BhePqtkwhE/dh9EutBWWSSwXywihh2Ni8x619svRBafN7tm2D7r+iCt10MLdwOAYpHx8alsww/mLrA7bOkfqQ7tlyNof1B1n31k9Zk2EloQB5QX8mspI+CYRx7VACZccCFoilLKkLZNx7Bw00KACu72PX4pXKprBozxq1NdS7DpQWAeVDnDb1PijKxy2hxMYRWsIo1d5baVGb2moQ4EQi3GwmGVObbtCzlHnecDm29My8XXrXJuGh5gX6DnzeTllV7FQzQQpePnaXBb0IV2LFj8TMGpjW/fzrjdhGLYbgaiSVwV17fTfaaFZ/eW0+KtKTxvzksqrulO2jGsRn44j5KfLkTXATt0kqZ7ikZb+FEQNiMRIf2wuIERWHS091+mEF9CXHo9xaMdHyNWJmDAfjCOaIltczW0eBUgruKWsApWjozIzG80gP8aBb9Lxu0J92HmGcBHvHPLJcYjAw1QegXvuAs/lIcdZ5Fobt5ykdAmcOb2uDnS7j+GLPTXzk9gnHsJ/N0YBKaYhlk1Ddm2wIFQ7cBO7Nx+B9SOxROYtGAe8TTtdPkADNKir3xAd7bBVSaFS3lZW2+BP63kd3+aZHxWc60w3EjBb6jtWjDmfX7/xszd0Tmsi9yVemvcUZ2wor78NPcVkvSLVXYgIA5t2o2xtOI3bYor4RdaHS9AQUZlz1h4tjdst0bAjWgC9m9mOPVhRy+WzKMdRRC6sK1jRu/5xlu91ANDjuoxFYMYRB1JnIMONuX/ZVjY0ZROUtiM0AYwIddRHFfuStpTeFJ+yozMpz1L+Tlan7VqUQoPFMs9v65YvMh1qFhaBFHE3KpIh+UAwGFRwdyRNvB+Y7mYh7PL2GNOpbZdAEmmfzVtI83Dd0wWUuH0m0UUjOUBq5bn0FnOhdPsllZ15y/G5BmWpHr/JaPfzwrXaA2yel6Si8Gg6hg5cS64qBWZGwxizETULT3RvU2G04sPqQqweWlCVKnFu/W6oSyhF2KLU4mRq37vkMoreRyFNMEplE+esgpPU7BcWl3ZoBoXydbpN1QTtett+KWUMin3RZp71PBiLIdoZLiTkA1e8d6S/Bm4mVVQ7PQ8BvOcRORdoKtzmEonT63JheSDpaSUozUeCCbXhImimhsMacYChrry3RSJfzqK/xIkJ10m9YhCENcgX9E5jqVWkEsy2RZsz37WGGgI/PhXxLI0jHKMpjiUsE3AuaZdGm4mbBoeVOf1JDEAorXfbl13Lb3zB+//OpfO/iqxqTEKB3Q+YThMXDG7j/WvGGRdD5qoXxZkXagNIOCDpI3c5cdhj5AFbCkZyIFdyx0EAgLlJxxyB1VnS5GOtfZzA/RTBsPn/FR7iBYKBa10/hLqUK3rZL4mN/RY5+r3FW6gf5UQqQ4SOh3sZSGjGLv7oy18BAokSkCDM8AD9TL0NrKcE2FjjuDWwePVA/Oh50SQfJ8Wn5afIuoVn7kGzLbl/wBn4Vq8URed5T0NImN6uHUKcCXqjvSMdfjYS4J1+0oMODfOAuHP7AI/EqFf7uHQyVaM+KJ28wtVL5aZYy8wBYNFIir4NOXkLgtJnNyHVO5QCnCxKf3vU/3S9bqKSI1xz6CiBBythcYGftW7cW0YGG2cAohTt/abo1Up18bN5x5PrKNsOmEbvKzasqdeS45NkULEhE4basfyyQSehlnvdBiypaj54nlBo3pot16QsZF7DVLIYiKuTlVpfc006d6jVojSHMwc84wnmw5N/RrHSj2yRhRgnUPUsPtkTRz2EussTgivB0fK0Iod4ghTtpGQeMuhvFMkugo0GCjfa1cjtbbg1/VrI8u2CxxLlBkXDm3+9Bc/vN7V/htqzhspKriwy8GCV01NrOhqWp+q5spdU/POP8i66pz5lsrdUlzpuQjVq74lF8rt0AYgLtF7otwiuc/vODqUSN8wfJ9Io40RT+wFjThFwLEdv068B4gsXEtgmOmU1BQFRWRlQFclVIWv7OK+KuSU7i7cuW6aFp1m2GjHHt/ZuFiDw9F7wVDHoVyPxbTEcI6Be871r41uTG2TAb600ngWc0PDsgFwpJ5aV6aqghgAi2kCLCOKKXaiDBLRA6rXtf1wLkTJ0RoVOlgnuWJqPkJs3XteioWeZR962kyO6pb6Ps3gKE4WE6qIZQ91g0+ckFPaskMyhzMVnH41iEcOLnGqd6M9ZFxTrPRktjR+CRnO8NRYRh5IDtu9nk0lyPX9rdmlUIIFL5NB6k97jy9SN1fGanlZDZ8t2zhThOL/eQWQ9G77qDvTa3vn1WYDJlJ+RZRSsJEkuVDYKjvroZ+TOhntYdedDRDZ+LWs7VxXSuqs4nx23NrJfUwcU8EmLr5Ivsk9pP4/COOu2FTvMdiS4GQ9hFJ2Zo0PUIzUOXdpQz3qBXnqvgftHRvFSYUR5ZDosXcTh2dp99SJHBSt0T2KcOHIk1I5ZBulkGyQusBWLq9hTHd7pxZCmlBkvlW+9VJ1BSkfSshM+U2A9rj+FFiQa5syJL364tAAwZbZMKXNsmpyxQLor2SW3R8Dn1zeepeRjX44RuVFIXY9KzKP7fD3tVQMWIcxnKcoAlxe1eCoNbFYVoFAvnvkVn03Y8JuO78f87SAdfzqdZhy1DOiae0XO/COQhBhwmyZPbQlnpKY5uID6Plt6tOL2upIBvF4OpoVDKkEbsNceeRuV+4II7p/8FHOWG9FXMgAq0aHQ2moVBP1xk9Pdy4xgNFW5U0jCFv043shuo++zIc6pDwPUiWvZaPskbdbuX3digBn5hAlb42jOHJkFGrRNFsZEhi+q/93g6D5Iqjvj/cxke5wmx7cDhLluRqRd5Pfc/dW5KUOOmuI/5g1QnHPWPl8gvRdnCbzXRvw7m5qOqn2fX3MQ89Yh5oJacESH1cIqAMZNKSjA09e8RAFZP79MSPKpw7QQl6Ed1p2LJ6kEkebGV7FrVCZ69RJLEgNDiPABxFCWeN96qUman4PJvyjOTrROlUuE+aRe5uww8sp+sRqsbVh33gWWbbIhm1w9pZYVO7epwYFJaChNefVIxE9cFTHLLCrcfvSGq6ffaknfRr1KzkYbyJcrF9qBv7veMPjdh+ecJAZHKGBgbCanZ3mcs+7bpETH8uvtapeBEep8KbY2jL2S2dBCBr1CWiVbyr8AssFsH1qxOxhg8ET4T7QZT6tFVx1ec7Yqs7dJXYgWFX3fjyCqLwBQzzgSDHLu7i8ocYlIdoJJuJz52f47i695af0Q3wh09y7TLstW4e3jAN6q4LegzX01pNXtuQXxNKDCMALCFu11luURw8DfKu6GiwW64Nra3XIOAxd/ITQLw/L6GqY1uI5rqihpZiIqF/z0teWqGn0y4HuFhxABtfZ8HOILv8FAiYnM+ixlknPMgBFUYm2iLm1G8V5ZVlJHP0+pROu9MH52l2gerf4ods4TkbAnaTkn8ohr8NP2A3a8ia9hgMfPIqzAI1mE6K1+f2zEFF754izWPlNFAXfurXkAENpdjYrmq2jL+jBDxiuJ7xZLfvxGx1Y6fZKIQrFmYaG477MyBen1Uqmhr5NZtu6IdqqTReVRbTtPoUvalFrtKLJ7iIe5gU4LUt5q9vUhPwrAiBAOOyqNmt+S9kxYQpBxQkqyLuGxc1YaM0iHj0HbzPJBX1UVBb1Kc0KNNdXF5h0+XErFNqiiTypKo/IZDSyodm/ytAbBvmpYN2YXota9zIJpNm1iJDqqESfuQayg2NiGZrqKbG+p54xMD5Em0ihinqJwM8bGsKFsn9zcrIQ7w3PIi/wzl93d6+A+X0p5tlPrEPG29lPlQBpgl4FlwYZ8+FhIwHJ+8/q0VhrDhdn7ZIN2mOqoys2NDBZSaaPE/ZEW7+DIkWHJCaswX1G22qLFltCqxJj6M0N1k7zclpYGnSQHT6e8AMwQmqVp0es6Ll8XUIh+nwcH8OEoYXC6L1tyiDeBt0HQizkAWYCX1RJpZpKji7HDGYfQdENClNlFi1wX/StpeUw6zfyeI4qaNHsOkAvldwNW32ajpMmQCTQeYNundAvXDqTYlHBe+EDAFOBQDVxfDMSbmwW4NjoUX6ao7zopFpOtnorHUeez1JsMUOtNQ5FSINvy4pM5/e6xqdX2M0E5b+uQgGddsfj1tFdyhv2fZYDfFk6/OWhpxzVNwsrqOR1FBGQRB8urONBczMJBcQkhfMTftWSUFGHG7RBklNrpvUqTVomC5Ck4fQjPR3vvq+af2iyRSMNFgBEgHXZF/9jEd+ty20RzTx5OGGWKDSaocmPt6F3Wu66HBpm0UrB9rD4J8CLtPcmiBtj54HLDW3eYSjS5L/xeAlHfcXgTBqmKRs5SsHY/R0sSYWye6XwoERt0hk9Rl2ZjTvLkvH/ptTFVJ4Rzl+DzbSU3J8sQTKE09oBnw6HdM54jcDUibNjPHdOu8/fAaJIdJZoBsbiXHq7mAi5/q/90VmkSVCmS/bZVwJ4PlHo/3I0exKtFpPTd/PPzjbkeU9WWstFXh+9EBj8oMk1KjytUzytgN7I4fCp9r4jzNO/8DW7DHR6qxgraxzVbQwrUTHpipHF1QqS75BilZ5jIBCOH5sZ2ODwxowCKJs4xGDnLCCtO1V6miSboMamiYSKGZFG8nfUuHbDPMmjZhDCxg9prB/niU2Yvf3KrpB+FqzlHCL0r7esZzTjPSGa0DurIQjkmVQSX8mttx3Tf41GaDNpHzTAZPmrYW9NhxB60xKnBrjQPFPnqz0sdFINW7qSBAkYW4nI+7/25tmOxCbJVpBG0esu9GBeS306LDFjSSlMxQwTG4aJDuyGJaPfhUqFjVHu75S0qThOZUvUv/r8QBP17xBmYYfom7gf1OCBZiMY4vuqX2dvYEPzjRd18/avsGelumNhDhGYmtvEsWGArN+AKZOCPgJjibUHtvUSuuWpgR/od1DFegE8xF+PKM0IDbuZCLeBdcAx2EEKwkvvwXZ1cXZFnwn3RV74Du4+T4AB1h2lVseWm/+v/yw8D/2DSwOIdUf/Sz38ETFEZcfeF7fDAqJzqDWDBZJH90li0YtpxIKu+/Li1SFHDo8wyeeV3rFZn38YYEZ+nYSZHH88O/WqnpsLs/HVLbY3uenMrXXjMBgZ0Oy79XJUK3pbRTg4r7TClt0w2iqmY5QPOOU/MA4DzzQeg5n9HTlRricBHZmoOjcif/KDxjuHkOyniYk59A3OJCjLqOzk8Wi4TfhVgwPMcaqQTRYhr7PW/y7182JbfI8yfZkyD5T/tTvd2RW/ZFsHuujX+ncQKkUq2mVEKsk8hTudJ1oeSAPCEkTe19SZwSMTRi7+1xyUScOGcXCr6re24iP9Huj0g1KIgAADNftm2bdu2bdu2bdvm72bbtm1bs4hZyDGDJ0bv4JmKu4Grc649yb1WRYTQVDXa74VTuCQy/TxGxEKE/64kZSkDSnD9Fzmuw2Qr6U07NZxeR7A3loIh/CDYA55A7GIktXX81uxFY9qdP8lxviorDgN0nFtCjHunlo90cH28Yw1oc86UAAsaozU44vsvmUwbl1ONYRK8y6qJx7mUOp+/vnGEhyr6IcQC6kIKaO3MfrVRHkqZ6Lj6NhWpdZNnaoniljpDkKDHv8gkwdoQx7y5YZ2aGM7VxBoBHb1bms7nmJ1XHip3ozyYz7wU7tt7wjD4UPEINfcRaanxAJFNcvsAWuSr17zgIKk10M1nEs1djiPN60B4I+A047DPLJkbq/7SwaZCgDmvg3B5PZx+1uOFfBxC0ODNIv9t1/7LJAPAXrTtEY3xZzAQz9cIKAXvvehfz4JTjjz4S5HrefyCCKEJfZ/Gq6vwspBXEfZYEmy5jFbnX5tMhl18tQS3CQjWUxZC2ZUXRWxy/G3hj2IoQ3cAUx5cszW6gl1Oh7nnvHc8gPGTwPYbE6Z+7pp7tjgAETFkggpkXoNAXqa+ZSXMb8nfBZL2wXoqDzODBBAz3X2Hf3qkzfnhElf+wOXMpNa8GvoHDinsgET1srj4zbXw+f2ypasbXOGjQ6wonLf1PJ/cc5P6Q6lXxOLPqN20OUJuZ4MuwVLfETCLTSEFiIn/8paB3YMomXy0C9Q6On4PYQXfpwfMuFnM1Otql9lJHarrNw2Ay1edR/IR7smat5ZtkjfchLn/6a7OIJ34VayB2e1S79veYJ9nVO3xSf8GEqNujXW3hBzf5MvKXZcjFhpNoj4Tk0VGX+EgxHt3SWk7x5x2AGKLQMa9qiaY6102RdYV+pNqkVGBgG9IqhEc5dMQJJz04dl/Qa/rZd5lnxRTNodoTWfju6dqWWIaMB3/S3MByyq50dbj1la5j4pEruvcbaq0wJBW6ZgoDoug4zqxt0KZbNLTavpUIoPqJ34K3g3BkvKdntjmqPnORddoRTOglfg4NbXhxovtCBMJ2USphcz+dIGOlxbZqGVMQwXJqg9I+ajwquSdh2MpPrScLkm3kYNgKaQieHx6BpOjMVvdFWPy4L4ZLVErYin2xJBG5YVJy6PQzl1IfyGRn5ZQa/R90NtrmmFhxyObfgCaP7lgt5L9r2BBuSxS3Gj0aF37hukqpram8xUTeU/P2G6tAw7kIu/PACsQYLcoHRjhTJ2R7bYgG3L/q9WBIiija4JnmnRLiKNVP/TpVBYLK+bKNmi3gAa0Jq1qULT5/JAYSFnunIlBKAki7ohneWySgSGBOKouRvnCFFK8gK2cHj1nf6inUXPiKsvhnaUsacfIesm36iLbn5twt9LTuqmi74ZJT9+SV1G6vlxv2G/lCvZxmpzM8K0QF38C0xoY7wii7w5mPxapxO1KZbVFbf+u9mtkxRinf+GuMbFFwC2iUFLiqNPT3G3n/Ctw1ZiE8VCOwEAci4VZb6eqnQflc9+PcRN4HlBjvNQtd6Ah8/7yeAazyHDZ4V3jxNPWIWdtuh5XLMDsNwUPMxilMHShE+27+q24LeKDMRwz1+rI3IBwg84ZBwC1GQyQqXRyEalDoJnmtsH40IGF7CnosIvRJEMMBTa1Q99/koXMY1aWTeSAIzZCyf9ShXuYTdGPIqerQqt5xzS4q1JXYeIw1+Cj8KfvW/sdHpwaXazXnVCiH2RPUUjEM4Epv7diZCisu8ga/4jNxgYI5a3Xad/1b0TCvLn0esVNkmeJPEv/sUebjdv16iApF8jjEb3O8ydk/uFdLg5ptP8YltlZwjlG9vLNDerOgzXA9i4WLd4OkXnqgvyrz1bto+aNuQ9+rLl4Ifl1q+x5F65wTeCNff/Xz61HNfYfMMKB0ZZ5eWro51r5b5tlfW7hsDr3OCoTGVG279slkymUJ6BNDhCp33zR66lNlasKYW4zjfzyr0EmAyI2c7YhVsea4tJOncZYIuIfK/R3xftQDvU3lr41hJiOMzxg2qgO6XvNbT40qKuN12caX6chJNMmn9RmtG72z2XFovfolSoS4gcvH4Yg9CvDTIFRCk95rCIwDWQmlL2WK/lO5619oUBd3rKc8fDBhkrV5TaRBtEK8/nHF0LQJT6l+sGIzEeq211ppGufoTeD/CyyG4jFY3woi4PyUoChy63ccthwX7WuU20/ldJCrMBoa0KIp7c5lA0L04ZLNbZYLVfFSwe9B8cNqeYdwx5ffgnT4tSmngw+G/Jc5XdyWpnU2OKMEotFwROs0q+v8mJnY7ay9mFQMFpbQFKKLIW04CJIUntFb+YRSRwcZqKJ2pDw9PAsPdXmP8YxSPyHDvqSFT5KeHgxYCQDvRKH0H+LLkt13r8KaFrH0rBFdRkR4mpB+BNefJVWvP2+P2hd4m0pqimgnJSQgcjrlqAsSsPo1keIPWvhCW/e23dnknxFPSTIv5+5MRvh6XLSbCGlF/0ypXD/vP4sPlG6CVW+Sbqc0bhJCqCHQhW2Sq4boIN5hI5jVsFORwVAnWWLjlFaOxHOjHoewyqDYrtaDT7HuDurQymUa6l0NJXo6m7iP5GbgGUzqZOX0KDh1TWZtuP+dcVebWOiXDmBTOj5grwOxeppCiAg2t1kDmVRsLshkT7B/5MPL+dItnx14y0yFun+CQHOcy5Qw42305alumySpsFBy5eKn8wn4zh3XWBFUJw6NDSZNlEqxC3HpfCJ1blS91VSHUceQ6zIh/aD/bXboeozJmTk5/SZznNp86bifW9KK68Zypd9ERv3qGbQ4Edoz7zZJeXi0lYV2GmMc/zwa/5bNVOzs6Ws4tCI46grAPaqVfda2gA3sN94BiZV4JF9N96OJO0XnTYUGVVITjo0rNucKdSZ1cQOI0uCCWD/pC3hU6cGJ3b+MVHANtXGtCrUGN65odz83FkRNVHVmz5tfX/2EJjynzyridV9FiGVLzz9KMqWWP2jQanT4X6+g2qDPbdvtbwY1Zou1DMVAi206yOA7MWNIYM3TIADpNPxZAtSOcWIuoiiVLHXvGsBKBddoDvBmz+/X3jbW4jY00QCHRu3hvVmDX7ZB5BrwKyJJ4Ek5LW9opelysW6wikeJ1xYMvLYwDL3KiDxKRn56nQcweuMbddzoHHJDYOTwHnfPggIK9NC4a44b0cbK995/OvgBoWQh8l6KVDROwKG3QPPZVB5P98WNl1fI4Zb1OuyQDnTxzjlMGorWZ+1zTTjOHwv7tjJAgHFYAuhAOEfsdamD1RRHQlvzIS6zXkdcV049hHml2vmH2iywzvuLXaHx0NZp0JbPG0Cm3Qd1/igkN1jxZzrmbewJMwBENPxp8qcnGe+4UrWhWd1iSN+DVkyEXoo/FYp+IS86rayw0uVaZYsyLd8jWYUSkvrYClTvCC/nAuUy7RZuEr0f+JJEliNHjPAzbU3Uejf7wQ9uru79j8L/W7NPmWgzGDuej38mUMaqcs7Qw/LLzVQY5ZXmAU5dcufBLbzWJoiWSBVHih7ta4tZzoY8DiFgXShCKjlNjUfLQcSytZ7QvlDVZRjpPd6ILDkV5snlWGzUrHdOCcsds5ZTiHNTtOPBTY4sOjC7XVlV9K6ZFPK1nFHsMSQ1Op+j94DIJD3ZypVLk6pf5P2BGX9MJThWeqD+iUq26r1bNHbbRZzMTBK63C73hAzeJLfJCYgdf28+trQ0sfiyvaCk2XLmiLWeu1u7X4WKxB9F5KQfG4yIRb10e8TpV764IMTwA8a1DLeXtZVd3wnf6VizNl5SbNhk3ZwuZxZdvR21fohAYpq3/xc3WRs33scCI65uSrz9S6RNMFQMp6rc5YxEJGRK8qS5uwNpROpUAuIAepunXsU8fRoqXhf3A3ZBmo/4LB/l/9swvjUoVGcKPUZw5MDUKNsJJAM2U03lJyN1H6UNbcTXglDkfNLqyK2tdOkfa+KHwPVqMscendwOZROpsQkEHgzmMN+rOp2e0hfwQsTiHU5fJGusjgapLltr+vBpT6h/6m7YLyts2u1JUkDIOAUJoZKpegA91xmxVWDwz9v7gnxEAeCU9UF1tUL3YF7AYFbHVZ5uGZiiEcKPuHfEcid7FHHxV28hjeGD1gXszOVC7LPhfA+kp/NOGb2GGLkYaO60r/35evIT4qDrjAMfenoyF6D5VtuzhCHdyGgnyJU5SsaRZmnOUYcjRWngyGc1WC7YssQ0jzjo9JoNGHXxftfFuJ92Scm9YxDVPYaLcSeFkYPOp1g+Tt6jxeqS+7Un4foQlWYE0mMmgrS1xn4WrZtoopkiWG0dQeDze9YPADavS+iXgt/y0UCm30CxoTCEliX0d5GmxWR67NvF3FDjcDoLZCPkl5axLCXTnWFC9/3DHO/bmhrWqFdiqEef0WoM7CLsyO9jtjgSS9FDSNU30u9lA2Vfjq5FnYE+GjBJjYTV6TUuqE9jtl5NwVPafjw7ZB1D8ztVy04dMe+esNd6JpexuDM6NRDJrje2v6ToFW/ksFjNBRcIXzMAb1WCZuMM6tfJhorEA0+zq+1BbGHZJKh1n7x1r4d9484RL0395YwBiILx6LvdffVhGrPVbKj1uouogJ5u0KqUlDURWZtYyfdUW3xF2035XZwCbAaFllkVVXTMOwWYv6GnxKhGu59JWcVHJVWn4G37qIHNV9HF5HBwetUS7T0hG5IcoseOskMJbwOG3QNPKwSMlQpo2Wz+gtw5wDJvkpKMXDcuwyLNk0Gy+BOXGTqeAUzRBNGyAB0leEvclJIkLwXxyszJQuDEboScp4hWz3NvYK/1jR/GiYIkqVB3V9fDc0ZBcUTFcJHHP8E7saHtXdAzsBPbHeMtbQr6jp5fkGISw8iMxJUFS+h6Fc+iJw5h4ZCNDMr5RG3bv2VWj0q5o0usyu0lMGQFBvSV9dC5APZUf5/S9imGzI97tZA/AV5n8jme3EikM4sLBez7/iSc6jJoy9HU8+o6T7iaQLii3Yg5xhMJwnlcSGyr2xWzAJWhg8N93pnozkaS3GK5q0R5p3REIzSUEqOmijtmPtaJ8nwco4v06dX/FuGqT1Y2/FwlaRrJUpblSCBhf5OW45s9ZYotoOyJWdtCozYkO6uYNfqkFpYdELb6nmkm8ZzGnOvYFiLH4hUsKFT18jtN5kdyD8PSh0QvIepC6S9ZiThPG1K5DLMWLQINL9/1dVPugHEnWf2Mi1JRcMZCcAlk+D9iGchnGnx6tFIJPn2p0kY5uSavcT4kMeuBKhR2jbJ+BMQQONv4FpxxM+ZTol81eIh9nbAWbW7iu6WOlQR5HEORfKO9rQIBAmx/vXdAhx0J+erjphHeNotXncKfZCtWw4kesx8eFk2N3xgWtzugfKMfwzWd6H4c4zdZvGHFY0T5xptWbSjtWIdNAqy1DCPo/RaJqqleKz6wUu4aaRhtqGxGrPgYsyPCkQxcxGFKNagl32y/Jubmoi9UdRDTKMhsGPLzPUZzhAIGNO2ZEgg4tOLGFZ4Q9TsIPR5UgvSxNvuLuNBFYjhMKy4QkHw9x+1sZFC8gxBZSE6IFqxLLhd5skCTqISF7Fey9GT9oSiGePRMcDFjld+Zbie2NA760OUXDy+abyvnoWIvo8qmGbWp5CSEJZBHm9JLhcZKfY0LYkldpGr02uG4LSQAbwrn51EKJVjAJkDgvaKvEQPPn3k9OkUjtlUUiKdGCiACDS57pRMxgF+ZC5BdxvRp1zx3wUL0qomVoF6S4V9SgIf53jHQTVkqn9FCipeRuzRwSei95082SfEDU/vyxZwNF4dlNBgih7SHJZfxesoveo/eCtFXJ/Oj2aVUfhc/UN5GnwVWoLFNtOWpW5JOh6e8PJ+o6tb5wKURnyPy6gBYrV6HS4Q9qWR9wM88ttMPZWdhh896ykOaufFJpP1iPM6kKhfueXtUciUcZzK2KE47P1QKXpOZkxKvsgmlqOqQ9C2pmv/SYzITO1D1NP2tpq9Iz3EcB+YKU0AZzGFJyiZkpBF0dQeQu7rHjrgwongNR3OsvSpqQfgwogxBZlvtH6IIv16RXSAUMfn7TP8+gq24eW9JRKcXblvHobAk0fL36Ugc8aY/qR3pkOWq2OOBdXa2hLhwaKq6NSqB3M9NgiLQAjQ0tSz9RQjF0u0d3r/U1/ozsWwqzKOgjiLsZ0v9N1ZkHJzGb5eKPRHIrllNLS/pSewvDcad9ZCSB8O70NEwmVbDDCxWxtDOsATBrbFMmbGlMhgoxqmu7VoQy9Oi0QnByg0i61QPSadQ45LIclDRQ59T1/G1cJWq8ZWVkTTNf3xU8ZRKSRU4fuHUEzCN93vixeGITE60ZuDFSoBG9CRERDEsAQbblJbsqS9M1PP7Mk+J7X8dPueVGG7UKvpaeGLXkcxSwC5aM3Pl+SxlDVY4TBmTALrdBHObal5RUfrBb3k/bMzY4OhHQBa3q/rcKQPy7ueCmGCA9CdljFg5hEkTD5kaqRkNP3LiwAAZ2JymEJLLL6LixBHvMFqZk54Hm6QnBMBSToJbt9A+taF66BQ0ODSuArmvRopNYxu2FBXG3Vfs5ARTcUN1g6TMGThCWFN1U75KKV1a2eaGJU0TXgzJ7JU6T8BzoxIDWV/7z/uQREXk6RVJanQau5vpVPY1BZZHmW1QIS8a7kl7agzujxVTuSLf/AocoIsMtPxU7vq9dXbdq+YRlWvOpapnHmqpDJ75r+jLklc1VCSnaiEnJaiBzqtwhCEAGDzTK0jlAn3ODxRo/Seo3BB2Vg/JiMeC8/TXIdxQ6VN24jAdzwcDL54o/jCJdVORkb7RFUzz4EMZGfrnP05lvIizW1rv2LMtvGrM5pSd0iKNH5I2CmmjtN1fTJ1kWZmabd1ltwf+B1l7q2wZ9m2O/MiehF3CrpoWQ+kdE3FuSUI0onxnwCZFLvuOJNmgW+l4ZLbYVGaUhByVuvoNru0CbyYcqC6nqVekhtd6D9ig61vUjQ5wsfvb+UsDSLihYEZEMkurZpUtlvm/sL3ELfTPUT6PQg3mNhZ7wbPchWVkZqlZbNBrC+3HmULqIkTwXie0GupFeO+1iPogl15R4jqw66eglrLuj8oFRKe87zSXy6SatB+63+IjYK9yraX2iaxA7hqmQgJrGBve0140609YsTV/PA/Kxb0+/IHwYV6IGszcm/MYBbxz3rSN92STrw9LTU6S3aEildEacHg7iX9I5JEl3rrwAnWgjFej9fncQL02/ayumUy538jtBKdMPTakcYsW2M1fqIa+E6z4hJdoF0LoeM5SXihcKMophZB7+BgFb29jLPpLOtctonODVqOJrKcVkEH0nIjgBMZJrFuNElM+F7D+/pXu5Ti0DAXOSvBtGzKfmVCE/peCo/pX0f6Vx1IpOwh941Qx88X0i7wEjusDuHsrbz8onkQOOKaxbWGhh5R3HG9FP9iT8cis7uuLeq//TTW3B6dR+nKngyUwO1nhs/XDmEKR5JYlTo3qYrm6d1qeUzMgMddNxo/UAG5J+5YizqaofGCuiQHYvDR61N0euvNJ8bT36LjbgVxAYNh3pPCiZ3w4OrKAjanJZ3YiZNPymxcesFWJ3ji4IJji6BObh2GNCtBGZeqhn9/2OsxpG49bIAN4VlBFSqCcFLOkKSR5RrlDeFfFQ3df3QTPDq4FYVTD4Y8a2a1t1GwwvkYhiUM12CK5a59GJOWcXU++GK3SQZN5m5ZLoxRPtpJq5G3EvDHPxjX+Uba0V/Qrdo84vy6fisg8BbCf4C1Xbs5rvpQi4pN9k/2Qhz2gYszy2aYdr3cUWqeh8e114C241zHbVH6NVsk5Vp2IO8i37CyD5AH31Bt8NL96kIf8HN2uO46LZMCOUERgAEx7aZ9/TiGxY2B8ygwY8WHEGqtd9Twpflvp4zSZpyj9zT2S9saUTXvMkAwUFA52juIrBaLpHiHJ7rml/k6pL1B6wiTQeal/qHj7/BLTnVw1dSVOYqc7jUknB9OBNVZayd7ItdSfVFaazXhAW5bq/EyZpOuCKgmp0fwo3sQmkbopJ9Fqd3VeQObYONYsd+ve7iagq5Z2CAvgr5hDwwbZuRtOGyEgVynh5CwUNZsl3XRmJwKvsJ10f0H8jtHCf6OQRk8EV7k1gkvT/Kl2SLyu4jZpmFtTRO/w+KTTnN1wRV4M5X29wtorYQRW/D3jSVA/raYqa0P7Omn7OQfwcnThjncWUCLpBSv2Hhgu38tNKlHLO7+0wrzFZ9I7M49J+5jRMl7UZSyR3tQA/hhiVWFKs7EaMuhExCSS7TtG3z/fagzY+jQApGjQcN9/KTb8WmURTbI6qvkYtOfcXlXvA8YOcbxydWxPlY4y+uKlc9VY2lZAyW3OK/DR2S6iL+7lVr1dxdldI5NAfWkuQast08F1g1ZkGRiuhPqZg8GljmJrRaSybM3R2BzEViXTg+I4ltejFg9OWXsp5gl2WMSbxcOddqdjb1vveZrlqd9VHRfFDCSTmKvHVvLzBXNOSWPlX5mpCzlYN2i8cSat1X3IwKGGEjT+3zgEv9zU48gg2SpMIaeL/fM3qEqLWxIM+MKxh2+7q02a1x+hSdKzDCcoEUC/nXkXijQeYEMxO68RWka5F02IpwEA7eV+Ov58mfmitNxSkSMT6Q2cqD/J4bfM/zdisPPup4b0O4/mMckxkLWy4eq6s4DMbLcy634Jf7tSL3tcGWf6IlQR9A1aiwUENswfpEkEVWqmJ7ZOmy6PyRufhqgNheTMsBwa8mF8kNBOrQWXcNMyw10HIF7ezQLe7tqFEYtW/9YCT1xBa0STv1oH1Pyr5smWN7PPinV744ualmrJSo/9y+bNTfqNsVWwWolaVn93XVNvIfJgVCvVJmGv5iAC7gNGJtllMh7xJP5lypOskTe4fliqmy/k1B9FMZh1q938lFC+rgjLhhFN3V7jWqZkRbJoT0Ub7boB0WOV9yEjHb+ISrjTdwIu7h1UE5MJ6B8Dn1Z7DiYHyuK5D6GUZg7PYk0XDpjmys0/WYzBmZifPG/lHGV5k1ErquV+DvW7aVr/ikdTROT/Zz796bzfhtNKdfsrb9MvBLc24YoXwPn2mo/FhIu0DKIY/W8f2IqKym36FDrNiv4O85C8xBIH1oKqD/6+BmbSLCY/myt5H65Qi5QqKjE0w0zq5aB5MMeQJWIB2BkyXkLk//xgporo1jeufa/vhVl3w6XW5jQE4o1FTt2EQZhG+bm1p8TYA7RUKfsbmRJfVvilKeKAU9VRTGndQbA5I+WtDu3IxUPmqXptXxSsjBzsKowF703GttPFjodNCzXYi8CARuoc8+QelVx1qxLIPrIWSDQuJPfQWV/zoY+EMbRR28XG0btJU3SyAdUFA55fEqxBegSwBtCQ+UGa8IoD3lfTAIxHo+5N8Pasaz+Rtw1ry0ciWQYSGO70pYyjlo3RMOVxJIM8XfqaB+zC0JapFLD/3XS7ZOhd0fNe+QinMCvEicJv7KanS/JvCMenjRPW0QPezskK3G9s4BUvjk9sEVLl/r8bk2Ihl11caG176TLs7dDWzno6VafsMkPt7ED/XKdfonFYRlX1gPfAmkkw/pMX8DWNyL9dINASpl7xQavgOptQaJhJFm4yWbputYnFdBuW8L3DWzDURkLetnWz/QfSlZ2lNK+EV2MnSTtMD6VV0cISh7wk/bCpUHD8HZUOXaM4jIYn8WxJrtg/zgyFqOQGzG5ki/l7Ct5bnOgd9w9ISa1JftGkmJA4VzBLXgSjEX1PcFxJ5I1ym7qC8RFQyDA61Y68tbhO/z9+5VR7OawI9WYw06Ma9NV8WWbWSV5d6sOLkyq8FEAwswzdWMXRZJDraoNEDd/tooo8Ts0TWwaIXc9oG5AcMt4cJ1yzr+9tljTW8XvK7+COtwdGVP0svid9Uevv4M0PAtdQi3KxSMJjUpCBYulQABZUiEB/ju4yuN47c28rqlELCciGGAg6aMNwXOZEs7/WXNMM8JzVTqgq3iLjmltAMNf48VyX+B9JLELbtlqEZGe8806rvznPbyPlZXfgdPtRJIl24r6kw5WdYmSSo9j6bh/knad6ydWBn8QR7hrwR/hLWBKlbZLxLPozbQSPDpb1UIZWD/m/V1ocZ+5RKW+nNcN5SSk7fzF77EpJdOyZXtfPC0/3c+ssFZSo6Wz6DN3oOgfzD5s1lQ9ZTc+0Rtsaj3Q/RvvpLHVOIb1zrNPhe38BLeani3yX8kVUT6bl97NW7yjSsbx6RuxZcIzH/ojOdKZ5b67J5BBd0unmQXDOqJ7ia8dn6HFB4t6cmGeWBWwLhTnaEXQXWY1Lsae6ly003qMbmZTi975KzmabIxCVkgdIXuoV4xNUbT2hs08kKlOsNlM48FF2NHBYO7Jghp5csPJglf2plIu1wJLRVzRCq51i4Rwma+XVDpficotSEqUe3u3KpLRmxo9N3TG3R7h8GkgZOyKsgc398cKmg3GT1p/zsxAAUl1kC4HIA2ZCjQ21jV9SCwWSVcxAOrm/EnBdf4hhTJldgmkwOlVw4cVFqoEojrelphprrguOSTYl+etS3FTaSJ4YeTTRI2fz3ygYUuTqISoteHC+lu/EotUKGWa5wiUtYycCWeUZ+dUUq9pbpuga3hMptsUvmhZyaz+W8jjuycJ0e6gP5As8+YuF1FuftKH+VmwZmJhxXkHpD4P3vGlBlGlYCHTm6Co7K5+4LcCfCuxQSqAgvl8rT3UC0yPwi+rNnguXsJXcYBNOnYqjj7E6KdYpYbTal5I0ckSebS8x5WoS3/mGjr07YYwN71xB6E/bCrY/EuuqCVNnY9qwNAaOu4uxcc+yDNv1+mWxmxUaKENu6RigQn0nL75vSWx80U+Gt9k9F66rDxZf8DzYg+8EPydAVS0V+isH0tFcm8b5gq4EZ3Dlfjq26v7DZ3kj+IxfmnMbojxTRj7KRLM1VhVk1jH6x34FtJzWa9KjqocTimV3gFcupbFoOfI4PhdEC254dElookY9QPyQJoV/u7HKYxq7ViWo77EnWDGWjcvEzh4q6UJh1q8G7mvXIBY1QiwwA/9p5VCat0jbRyVxRi4s2sqLQMuKNb2GTunKkONJ0GNpWu9aWd/KpGDz4POjGkT6jkwiJ7re7r1Pq5V3QsV2/Wm5+ne17FK68cj3dIh5zMCA1mILzcHZNpTGqAs++aU9OsXOx+m9iIxUDPvFKOaxzpDiR7UKxr2gB6c3LFwbJHyUZZomMX86DH60F6uhxDyF56304Li1PchggmUIKZxONUJ7kN56r9le5K4ScGc4sh8Cu7pZOzQlhm5N0qFLHT0SCFSY+pXpFe2yW3VXhakVWUMb7kfiGMUveN2XQ/YZC+WZ3LbjqSDpzh5VR7UuqEDV8ifJycO7/wH57fwmYt2uLUQ6nj3nAXhw/0LwCNb6s+s4FMaiJj85JZjl4WAV5wb/SA0/OAeG43BhEISqh7Us1mClMfKefInIFvoZDbnRepEDNXAUiMsEzwtsH/ubPqvMSTkto98MCbqKgVyD0LUDlq7xpJFX9ZqandYDfo/y3e5IjIY3QpK2N/E/Ge8apBGF4l4h6xbUZqpJP4FNhkn4fFV8iXnE/xS0/8cXtE7Q/vd++5TGyqqmJ3JtYv77b9C8B3eI4WUU44CVnlSIZgmHn6rhzX7U9gK1qSrliPiIuxXV7htT9b2ao+1rs7bLcKO5uYL8j+1rcEZQriungMiefCtN+FAtqXcu2D4OHUlgoMJyiCBy1MPvmLCoNrTz94+ClUUAavw8nJyvbYBuJMWH/he/Z/NW/d8HvkUtK45CYnaNq3LfxSH7nLRQKnhdhSRvxle7jo5ysAHxOC1jDiJbT5ncHqNc7BWeByDiL8vEqiCW8iV/M0JAcWaARjg9UHjLPrPyI0DmwMdGd4nnR4ZIL9JYLaf6F/s6Gvk8JpJ4A5bCQTVEO7DKq+jl7scu1CnwP0mKn8ZSqni47pH2ZIGSJ8+m/BjQjENkpiZz4kk8oEzmzsGth5nRJIZ9M7T/StoIDSqf2UiH06Sog74HvOQigUyUk0BZQSZqcckTtHdq2BruYyBzP2cDGbLwPw9fw5LYKH/8ppGH8EFuBPR/hvf559OAGLxn9fqClwaEmNJkSa+lYXMd4CCoGb43r+2c25MwBA40yiLfZf6Jza6xF6NVKbXRGOOFeUpL9M5SWEhddnnC7TxwlsFOgU4TPlvDLzkkpMq4DVJ+2vJ5tQetm841CSyRUfDtXUFFWikQEnqEfuIUFt3NPK7VqubaWKHij9SnpKuwFrKUHDKr3DD/yRQLLaKmYINtzwGPvWJ5t8B3D/S9q8vB1suBIO1K/0lyM28vffGU8mPwdgANsi4ZqvwVyL+7qLeq43qbVk1+CjcSIKAVfgk6Rgbr9PxdjkX9vYyuWIpL3prqrATB2gk492X00rXEzD0Th2OqbRcH4pFP8M7qsLEvK5pbpgfDNoF8eXeUg/tvWc9AK3WfL/GSbZs56J4gjspESSeQ9mYegcKuDSHfYVdZDfdf/5X5xFXAelDituy/e0z0BluxxS7CWyeEX+PkXngu8uDilq0YqGmhVH4a/BjTWtTWbckpmPTRZCy3uSuUVxpu6Upk82/c/Yw60EhiEGTo1pvyJas3fpHRp8dlDBK8aoge+PhJbZXSyqgP7zOm6BhhtveH3sMUdyQFEeG7brw5BYd88hmeKOFTf6VBlZ/M5LSu0Us7vYgBhiML/nL8kYqgtlxg7qRadkPkGeUgWM5CkyEnDgyH9PZKuTeFDBiTXe4PT9EbEWPAUAS0gNZakQ04xp6kMIOHzkdKc1LSMv2nUMOYp/I1BiCZSa8Ejk3ME4oxVrTdS44W399eBBcvJXEWkUAJkabl6ejWwVooLAMR/pBjXSUrioC39EU/Zkpy/dbAXMfMz5B/3ahhmAWlKNOmYQw62MjYWwWMozRFy2zKzlDutbxk8OZfHQQQYLKLL0ZMvxdFWSI5nTaMC+T37+hy42/PoferUFZ7ZCVIdQ4OVSTiTrj1GnciMdKTAvNhgHFq5Drd8qG9K9PeyCu8U4OpPG9vVi5FDixv0qD1Lc8P/a8F6gxOT3KS17rkRSc2lPjIcaTuDGitwzv/xCDwDh0GL3EebxkVsrpmb9MqqDI6FpjacYIX9qeOF8fwNQqhM7WZHj2ufTQu3SPMUZLHiCx9YGZw9YhpiOcoe4OWw6u4YmiNGoWBH9fbiaPwbXKhugv3WbEe1CN/yjJeWbn9qancET0Meg1edIESPEz5Hyudj4G3A8whuNpFNzjyfwIDDtxWePduttKxC3QdOzLMum4xL5cudEQR506HEqkpDX2WRMRwbX3I4gmDcpObEZ64u4uiqqpGZVG+GscSFLvu6cE2E2II90pVEQxtylyHh3kXZA0z1h4V0QiwlJ7r+veZO7ZGvhQyi2qwKQfjow37ycbKtxlKm6Wk3SUi6yl19z0jp0oe/bGkO8cdB+uSmUXcwiLaNSpmDCfjKh0QTyuFweCiZl92m3OE4/q4tFEY6bLxGUq7qimQK5Nt/QyQRdfknWtEP4Uo7lm/3uddPLIb5E1redVeRC0SK70YWJQlKDT8qPidO5KTkcPvQ0QZLYxyr4pGol3NjGnfCiH2ZkyUqU0xj6MoYkXy2QS8mFm8xx8WCHH2rKc/RIOw9g/KD3Wc66GPgnH5AYs8rOlSbbOqOfAZkUxia5kv4TLx/Q238WiSyjtzYg0q445zQ9PZQ53iCR6sWU6c66VgYYKKKIQhcmY/Jdmizr0l7UoNUP65RSFppD2n4/i5CcZZ8XAzpqEcNcuBEqH2xwQv/RfGXMEjsjUnFaYwNI+vRtEGeCtvdKyxXhielEBoz7hJmZT0PWtfUBUVwXIYg3b5Qn5Qh2L6pjm4YSXW3ALVjPM5vUcSelWlPt9oTayfe4ULjoaiPVVypUk26pXVp6EWkuSubMrK2N4tsE7fhJib8aG7AFROvQcS/W/iJxpNVyVxFIEuqvL9hb4R/deTKRYzAAiFSgop7nFqqkefi8CZH2EQiYjybqJ/R5C1zltR6yzV6O18wPo+CcujeHd8c+cePPR+TCWStgnAgsTQdY1wlz2AYTuFIIFWz6o6CuNPHepfkFX2gGV2mi7ImxZonJTEhiGFdZKE8zW28hPFQ1mZxvpckhEgKGZWUxK2hb1XakkSyzu5Puc7TjuSn39Z7xSb5KA0Phtp0t6ntnoXG4lI7c6e+tfj2YFSTGpWTwxvQ5me5JGefKina6MdSCv/RNvVJMos3oDBfiwX8JMH3JlfFyf+OoWUnN7LY+QvmdhpWei1i8zpBH/8pojv605ayIQk0+LGgXxSU6vclH8ZotpcLxNxFVwNMkjmhExGZLgi8Zd0XG9D8dny4Nph7n+p8NfPrLSdfnI0UOMUF62/HJXJXbb1cm51nMR6D1OA3rvjA+jmDNOYYX7v03g7LER8dxHYsZCZDqDk6nWcHXxFOl7TyKELlXPybsv7tIfGNyozAKHsSck8QwFXfohsxGyNL3YniGOUi6bUNx3DJsZxbtNQzmPNcYGDQVcRmZ42chj8TumaUtPKy4vrO8RWt1v+wtJon39uJZhIVT3trtjEJXJPi4VuDPDO6ojCCGy3Oq11ylwyk5PJbt80hya5b/juQWL/eOTyapZ2/82CGs2M9Sw7CsEke3IH8QFaGxfPgshPZxSMLhKIYLYqf8w654G6ZG1wKv+Oh9SH/5wwB9cy339TzDC9d148OzqDDwwSFmkDUSgIxj+Uy/2KcswMbjWKOl/e+IGrDrY55W1BibtJ8B8XQqe5JaEX5CsrugJGCnrttwx1vQg5khD/4o9LzvZ8sypGXYeGafZd388rryEScdLm3PQCvH9vOF/pXVuDEHX5Hg1zERzU/oi4lVjIzRs2D8zMf2JlC88DF3GFnJ591MOLx6p6cdqPD+4W1p33/Aiznz54r2JfJ0wIPYOg7TKqf0hQucqkYLaxZqsdhKFFH4bhOMwiDVQ9U1zudHf+7Dfny9qB+8bwTrizOHXhby8T1locQSR9tj+dUu1Hsj26U6XpXAhOLMkAm2Wo7xO52oeH4U04qB2571Y8Wf3zbSviedp0MM5uHuRuU2gCtNpJKzGXdGrl8jU9il6NPEZ6FA4YmWi7TaDkZEEkrrsAIfL906ANFH3nr0wnfqNmUuyCs0B/htBDk4ljvC1G4yuZaAS/svXQ+qZBZJWKC8yrPeWoHRpzG+OYk7ncvNITzhh45dh7qHWSnK5f1skLVLViWX7Vwsxs29OZQxApyfjoTTaqvb/0OFfTvZbQD7ulgkZ8FprJ1vG5TCZ/pg73UsNbIPoTIyspDzNxKpvBIu9XUG4UxQO0B6iPUINnaxUS4srT2wUFAnrKzoZUsKpcYDCRrYYAp9uzNO80IYtlz1l/WacJCfRnCUhbmzEYxGX4Y+0khpc91LjY7DiMnbIcz+XXd8IMYCo+zEUX+yNth/EMnTMZ9kOlAWg3MV6FgCxbxj8Z/Gk6qWl8YCNPtgskBicc8eJGK+K4ZOD8wcWTywNX8VNWaqRrxdFQRTPVEaXRLEolUVCQAmObZolq2jD9lb2muNi5K78HVgnzUS1vO1HRO1cVNpLKASqK/hFWBPv2YtE3MoIZ9u9+8rsTORgH2+mCxprPQN71pXyReqe0YM7dRTuT50QhLTdfFYamwZjMiRhTfhaZBOcDDf9BYoOPTkw6RhhtI9543qWEtVrw32A5n2vwcSxLGnxaX1PQjqbrvRYmcIGEFsNmr57a9uQYLQUP4veNbmb3mrfow9SKxLdaK2ItE37QDGxzhgYKjGaHp5AT4LoUT5cYy1v/IS/KpNk9Zsdy2pegMFzVKUG533PElm6ZuccuMwzO9RiWlCE7xWLdSLY1PQYSw6SZxzTg7/3UxwdtLbezzWMGkHf5segxKlVQiKWGdNoX2HcZ0NcN8RNn14uZ+TIYRv2UjEi+n6UMTMknjePw7vtu7ljIh6tO+CGxZpQJ2BmwywMPjT918xjS+riKV+xNhRdgk3G6p/LSiG8fjBm0LQH7ydW+iKUhKDhJxTgmFT3PJZ1jnEZgiDmdAyQkSRS/uru5pWT+poTF/+1IwuGoBpKjBAgqvO4ubTm6I0H5Lsq88h6thWfzbO/+Dt40Ojn/aALt0+2y864K6GbSfDXnauhzNj+42e73Zgl0AHOvgjPgaX1LxrobsqHrLZqgG+/xRFulv2q010dIA4gRJSZHHh456SG6fCr5pfSdFAJKRsuPwHQX/dMMTpOyB3zQjMrrH2wvjDG0Pfu4xUkZN8h7w+gp3+VpUDHlfGFyXNiO6OLhq7d2bIYjaQxm3ddQ8h32O1a7vTlFNjdjwbJSAMBVGRzbCIzR3hhPCjJbHeAuyQIbF5rA7LjKuWX2GoV5IbZ9Hf3bbqUST9mrKy81EkciVcHCW8zqVLswuviz1ngEk/7U0nW3+T1WXNulHAA2XLJk/gGasNyGefKFnHt2QyNwP2pfaq8VTalsOKu/JcmL13oFJ2R5EPbvpisKCT/cbuuLC8VG3y5rtuNwTks2g7cT/cIvlmfnfBD3oEL86KviRLQ2oC+zDoE92jSubQShX+CfVvvyp80w/7o1WJmCZzYy3muzyvwSkscfsNgwKjfrxWhVuTB5sdi2/mK/UZ2xDoULgl/hsgRipttRiaI4YXnJuXOk/qcudXyjhQaC3shqpTeqgZs8jMWQPKalhnyKoWuKg00MrLWaOPMuadjkRHsjHtv++gHn9bgKE/BKxdRoGIcZ3/QXDeBRsYrD1le9tg2W8V82uBfpJ5vTUIgwbineTl1C1k/6fwDYfjdhJaQDjmo2nGbT5ouIzVlNSDnLvAZEEdf+Wn4L1xp74CRyo1aUSHFJbm4xWKCrNSE/gwQwjS5MbQPLkns01AfTrjuF4XBJEI9Sgj88G974PzLr1bXd76C8DHIfupErS6I/wwKkVqZktkkf1tPtOqxduw/+hRe8SemEogFfi/ae24WSbjizCOBTXnhmxJ7Q8uuXVHQSZkvtG0AEgAfVVkmgqQD7QsamQ8dbOBpi3HGTDoDXsncYru/4R72v6E97lDc7v+qxftXcr6QyVVRRrYL09QbCkoTOkb2YnpvuaRh/ho2Vj4MmUZRfaaWvUnhM54kMovEZHzQxFiJhQOwSv3eSFTouZLbWPPCSUKdMA9krCLQmvg2m7q14gxeFd5C8WDdpJL7T4cqvGauyWCuRrK2MUqfh17btZbxG+1N9Q6h/rFGVf12slm/OK0u9ym7diNcK8ozdez+lTwzfD8zrwUcq5ai2b9bQ2AtzzuIdJMvaVGSr5QIvXqMpTtH/4NFoRu1oqFQ5zgJXRIp7IK6PhrmPU6hrbmaoIEvko/jxBnJxlOwDL/Zl7NfbvHzraFFFgknALk/NYo4BhDZpboel6uCu+sFL3iszfqfwRZG9K1HY5RMQT38vslz97ubSM7O+AXEF78PTtszkGytetrlub1JT9SvHF73/UKNoThFKAkWz3z0mAjEDR/MZWrinxqsJJBuKYx8hUvL0XdfdMuS31jld9w/+hR5RrZUr9DQAi0AK8YOmiurjEKEdZkwmyBIJFo1hO7j3/py2x45KSwg4sqXAZG3Ql85EA2YxiN73buUp9tbvBP4xNZtbV8J8Ryt1QBiASO87Z+pFhs5Ly6rpLbJdQF7L1UV5R0k7+GClLpHtfFKA3FGFkStWoQaXdySW0xI3Nz7KOi5EwnC8ISstGeO/ScaA4+rZ1rz2gxhecDB0tmcMnNgxUWcXpo/K2llA6AdYmL2+/TSgSFjhRaTxIPIqM8hpp+8v8eFEBBeld9B6fktKVdWukn4lu0AnchLGy7ZF8kgsK7JnoHGE56X9Bky/kKRE2G/l1rrvMFHvQdC5pajX95gGdAj8J8ZMJpsktWFLEEiKXGIRnKPv8jFly0imD56+DQHJpZtMgvu9dansFjcZwDV8FqnGguEWpKzkMApQdJ/jgkyv+2/XXEs41u6VnrlEQ9PDfpvV5rPOuhB3HLLd3bHkmw/+JrO+fCxI9290pU41WdNuac8BJhB5QyEuk5vDtqcBo7/pngXkCPNtD7sqzSBNpYVM1jr+cChOVvlVwyoWECsXc9+thRxwbmSmtEpRsjVfSJmUPsfA4t5QjjGT+W02jubzEedZmSG1redqRdfKb2QT4z3OPYC97Ac2uauviGudnTQT17WuaNsRtAAPu1QxJ1tcuX6aZuDjBtsQ5QE8ZyPUJIYP0nydy4Q5Duk+0zZBZ1DiQv/OTLkJnaCLGjrw2O9REDXv7e1hhP6gqufbmeSRXwIqtEUK1ALOc2KcgWn/WQhJbwUNReUZ+jrz5vRdM/ElqAp0xv0if1sFuVDdAAaDoPp+Qobq5lcn4uZ59J/2hclKZoDN6N09cuAql/VQOGFIWk59blCNT1VJ/6DxGMdN/7kdTHKxStlUQLtHDsgroKXRnFwT4LWf8we+wrq0WqnIG55yndF/LXn75OOp+HM5++UHuHfOnEqLNfv6rVqPwe3/IxYP3UNuSqvfbzgUJApuXwI8X/eu0mP9iOk+ZuT5SIYqUDWWaoo2kFlG2vldQdCWXAnuzrphZr8vsmUB9B75JSM4L+uwPdeng8rrNttLDff9TrIkkYiRSzCPA1XPaP0NxBq2LRX+3H/RwGG54KZzpVlu44az6KlfB+YwkkWc6d5VccUDlh5PFY+qRrRoNAR8at/LBmhzfCIqTzQhCllwgbYVNJWDhY/0wcZ5HyNHCxYbo60zpVKLJypoE+ndW24eh4AStGJjw2jKxkuzvxEzqSo/xEhiW9EfAAe1XqXuReojsYCdh8Hoe4f59cfO53rvz0CBoDhBc4p2VG5uC4iMORNV15RWHesgAKS65PT2GUfOi+HFiT/qi8oNqs6ehv05xfAIk0lpQZuOamT53L4it7kpyPfxeU1+Witp18JZQ2QBnY08puo4Vlzw/RMzTTGAh9JNvupGRWjanTlu388udqDF9KEBinUv+avMdeAOZpzGCii6ckJZ2ePFL3LXng4PE5Q43lIeEzfYVbDRkoXKjUufxzrBy4flAsoJsdWaVgqnFZDHhltICqN+fTO65vNSNU2cvBwf7+81hRWZ+MT4vvMtQ0UMlc8D9fJeHMQNXYR9UNpKO1RsVnv40PyG1fQ7beKA6RQd8RWCWKfjSKM4OocWFgQqLSTIW9f7sI8A5JzQmIzTSclKJhf56GQWZtVXU3WZ/cEJwhVW2QV/sJXIsoEim2KvYjyVbnV2VgGKiUOm8hthVAq6en3vsu3NWMf9Yr+cLwjYOJsOLE5xgBpSmxCm+5n7lLjkuC80efZG/0nhYvelytxPaGjZ2OUYVZLnFGLomsgveivTsJfyuMOXlHiOh8Bi7vjGA0LggSe3Ki3BOkVl6nd5KeFKoTE+7S/YUNMhn+6FTaH3OOk19X7BtcDtxAj4taq869VVvw/OXWCA8yVndRTTZP8XlLVs9C/TlXs5lgeiUsNJrkSsIHv+T/YxCfP8MYZqSh05SZ5tJXkNZoUfZEEkz9L7j4LNw/MoX8K/zqaVMzlr6rymWd6d1iz9pfOxEOoiBzyzJ/TRplUuTzcZch7Nhs3RG8A+SuJxhaDEWac1tstqFoK13N5ub7g903hrfX6EyCt+hlBklq9sHBBxF8b4zKk8ZWpaCwTv8GxRGCfICyo/4lccWD32px4SkapTShzXkeHm/o8PU3R0fSsNDIObaIbkCCKj2Kk7D+lxfO3BAbjUNTjFlRsDixS8wDQVDsIiaHuUM2gJv1Jg59gMuQTFglv0cAHB9xMapi1oEQA6x0gK5KoXqZb+iVd6PYbThdoSHIOUiClYh2xiTXRSn386etbnqw87pa8Sq7y0f+FxHju024zX8x2XVyy8H2JM4mrLFSpNg2bvUX7Q0WPm1oMT8rY5PX5E6SUkRaFjydgJZ59yCvRQO46dqjkkE+SjWcFexAk454f+0JXyUu6eRmOrSWEQBOKS4iVVROvTttilWSOaG7AhGOWgJEpm/DHsgAFkgdlri/lJpWkd0w2j4O7xdFeNEwfot1P6jLBu3TAW6l8GSPltv/tP3dyopYMqw6WeIu3Dr076d6da9J/eFhEZaHEXnp5hyxnUOV30SKfOhJNvKcRsl3iiAtcepVV0RL3D5BJiyLqV/RB9p/JZEcAnRYNL+jWESWZ4eQvznhGfhqsyfFg6plwFzKaQetvp+Q0BAgcXDY61syL6tNeQBEBg9UAFvgW9kTH6f+iZOf211DuZAPsvd8WTeLLGYwiKxXSikCKR8QCre+9ZsVoHB0BfrWfT1nNPZ00ocjuhHAaEuQFfZJAOvRHc0pbqR8SfLATvMDvIJBIiFjOrO3AkQCBcqMgifodMAQNJthdlCR9nfIAzhqNg+oniKXK2zm6WWqeTOZXH2Oztk+jPrBSSWgU29U7keMmCB+NQrTKYOm84vuPCFv0fNbP46dY78DvH5S72aZNvq9g7sztMPGDOhdHNHOJj5jEckT616rYKAz/nPClSxSy6Pi7mcBJpbiUbM0BbrsHAMDNu2m9+1N2wi2KINHJAVtCR2LBWVTHcMGjYxthRMGs2AumLemtjgQNFNKQ4oTqQNVbW1t/kd+mIdXeOuqc3ZGwDChJ3MNeB53Oz1bhGb++qkKZzMPEeYTSrzmGZ8DTodo6owz1rgoxn0IO18OC1ieZQT2+hhxo71Y2lt2MHOWLwg08LvyYeWJwHWJ7VASvP2QGGPUvLMf/K6p5SBUnWYxKlsxl7By9NiaQr/RcvJCIBKL3NqPqPtYEEoxcYrXmpKy2whBbxT+IpmnpQj5WE40IhJnKNWLB2KyS8nHKu/TEQaYeKgXAh1LgOb+6YWqM5CZtH3OkF90MuQezecF4E/+ZPaiGVoQWYQSR+VUPEiM/Qa/rrKJTj5+Qy4+17poRfut50kPKaeIFWhKBBcw0NLZtR82qeZ9YNVYpYdufTq+RLWny0ZyD+a1rt4N4IOrU0QnT1wvPmlQmkTii9RKCCz9a1IpZ8xt98HgAm38Lvf2LsbK3pN1gI0Q8DfMUtTiuhkSyLR6Aj0RpuXYstUMn/GLsgAb7XyXjKcooEdBfFJ7YBJHEYEisIxWM6uJQtwyk9+fRXn73uYMBbGEL+cu6F1n+vzGXwjlcRD9RrwFt3oWIfCz5Tlq5ujBQqSj8d7qpETN2iZrrltlQHN7w8yfHbIG5kO+0hppm9TP+N42ad+pTArfqOcIIiYqrcGv8XD0xRSmSX6j610Y8azeObQLEplrblKuS7NQubXhFw0DIhvAEow7P1RZkXWZA/jTmATYcRzo2mH/SRP+Wl6i0v7BRoORyp+nxgLY2pziDzA+Zwutl779L3REoPFsHBinJRJCkBa1KxjP3kC0Q8zu06KcHsV018PrOh+fhuAcWQVORJcKyDLQyGTSg+SjtanEbh26l0s444MAg+Octp/Rfpwq3mi81CVw9MIYPpk3y7v+c3ISx4IASSyBjj8giKewiEkXG+/9f//+p2P+KR3xy788924Vd6IMcB4RMmE3Z4WHnXunFFHj7YRM6mkyScVXVTrsYQRxUHq1vyZxS5k3EwzkT5yegQIlkki5/a5Touu8JLPgLcgHFfkZCuSgaEr6sNdJLghmgKMlj+8VtEzUnVFLw0TQk8zXBpwmbIrdrG8rJTSkXxZj3/qZIzyvidYQzU0L1wY1b/F/VKIwg2uQUldMtPEvSpj4yr8bLzarv549WBrTY0KozmE/4Ev0b2Wy3t0yufkjY85U276GOHFr7IR2q1c79d4Fbw9oD9dLUJtM7aid+5xa1WQwrsk7yvoFJBLddX59AVgP/YczQOtlv/HqT4xwecwAe8q/nR0i9pnFJ6auxPKtay/OkrkzqiW8BoWsKE2L7Ajih8UMJ52r3m6eNdlFWxZQH3vWh2eptIkDsf6Tbg2EgBgAAwBgf27Zt27Zt27Zt27Zt27ZtdYgOcpGjX3kekvrL+AirgGBI4XDXVn58TFT4xFUMcZRZZTdETojgvIxisZ0nyxdFPrLQWDR6esJ9wV52jVJ0LPLRQ9j7qgHzsuHLrpVDSGJ7Yjzqw13w5BBSmOKw1vKVajR1rwTrTLnQOqB3Kq53MNr2d1V3jOOMQIZ6Uogkc4C6tkcqphhtCtenvNqnoHXpH7ybFvC9zPe7An8hW1O2FuWfAKhlTC6SecLz0jtzWdeEVr9eJVbzDGPW6lXcDX1FtNOj/fN6zBFLgKClRr9avdra02Gn0Awi5wEgoy3azNmZmNe5pM8h30r9oS0CGX5WBHh9Oljw1BGBPEMZuNMlThly/afZ8owlgIaA+8w2BIh4bwK/spVESZFCYmU4z4K5Kiy5lfirCRPPEPy8p0EO3BkRqmuqyxW+Z52UpNlLmbY9TDdDmZh1T2uzjrspip5AYDOBWt4FQJ6HPU/rjKYRQnLZtpnrgDdSLiHO0kv7qtVJC7KyBNvBfqi5a52nBaAomjRXUj4P319mRJSYBzWsr7IZN28tva1TYQZ1IFIIAASXACyqZUReBTbbS/aooBJh+Yn4GmNyborwwhEaBpY7310G3N7DBjm0vo1tMaXd+6ZLlDt9N9O9wu6alxHSklQRRqLmax7tVT8hYnJI39aWT7jLf8E/ezrgOH8PUNgjSaBbO556e9k+PF6ybwjsNYhMbPEf+Bcer6ViiM6UWS1mQCy23P5/fxH/ck6YmlRI2tRdcJJD77d581LK9l49eqBEJPkBLKPsk3+YSJycKKrH5po6ORp6TCWOuJN6rz5JS0Vi6+atdARVfeYFLxMdJVgE2uyQI8hlPeSqzG/RxdOccD6b72vK567ptJMLNnEM5icdMREaSvqsfMLPBTHJI1tWukoIYZWWqPTAfAcD1F40vTPorT36XQ0MJnApTMvYMxY9miNN9DrhMNdZH2btgCTcCByX33WF5aIVr21s57fYaWulNwuZH9sYFQK5k0oORrLA/6Fadkg/rM5J15R9zMDK7SP1wKPQhisctEi8HE9FwOPGLze+AKRVQhF+7oFbMD6D6GZ5Zp+vdqKF+fMucClUEIfGfusK32+WYusS9Pv8fdSxvUbdnfaeHYiBlFYGc4ev8Q/C358sxaN0hSBY+HqcA2p95xg+0d4unKnMbkwlAWWNwb7bke/ZI7AKN+pgYKGyztYPazRgBGkT08IIpJBor8ZCjdd9HERhvfjLQLoQcXYi0jsX7BMx2UhQXFsx+2y1ENkQwj/tE5FJYIkyGRdZgFEDYmoSQsuMkvJq8haprtO/xez7hDkVxf2DYe+cHg6XiIdQZ17E6tsW3P2EBCn2YL3M5RfY8KWRBIKQf3MWYhWK2spT+HBLZJkUZT3uYj2lcy8tRVFSYC1jZUP6jTrHjmbWr5BqFZTz9t+9kXb15v0LBZP7TYQA0UsrLBCLPNtuyuR7x4d7J5ZQnTapKmZ1kqNw2ooDxHZ1bBsTXnqnGf3eWKfGDuJKwOJ4154IaYqwAKW95YsCUA1rJskJypTgRhHQUHYHj5/C/U04HH3ugoDL4hZIm+N5HMk3k0YiHcKgAZusSLoXykCJqvpMdDdc6m+47EHsYkX4fRkVwFKPipf44VNXmiOr8SR7lOeXPdzo18B0I0z8xFSjjulLnfWR2KDNHX8c/OQo06wwCq3vX0JCQ7n5uL1218y0550oV8Jc1+6tBH6MyLQ/fjaokkszmDAWvfR02fVdMiT20j7vOGsWHBKiy8u/dmbRhbbNXP4Nsb1PbE9adUWrgqA+yZ8FVMwXrLtGKcdqdVXW2cw6sPkCDNLb7ADfebdEGv1VCGPP9BbKAh68/2ytSPs0MRZlRdU2fjcw2/wmhrcHkRcS+1yMql44cAr24wEEXmmVtGlCXDQCZJjiB7xUPN6vziCtPc1z7K0KqdzSJQvadhlEW8J6Bgdg8i4IMpOeV5X++q5rgxzkD0SDyxnHWIHbvDG2RvpHq33vvjPPAnwcr+qg0Sqmgu5ZZdaW8sfTRkSrsZadjh41Q8riJ852DGxjL20t6S9tIkKS7I4hn9NpQuxaTZ0Vhn0A8Ylk6ZRmGpbpeq2/FlZd0CbsafZety6RDEnMOHJtDfHCLkE5l0qT/9ZIDldkrhYBAdnOLuEdRlAzJYl0Zp1Cs/av96zZ+4serMApgd0x3RYxC9EqwZXMRge6IKewCpo5pxTMgo+BRdPAfkYUFIiIell9sCeLKy3bax9K9nlYeC9k5RVngJyTSF856OD9grlpEH6RemnIIhG/f9e5EQZfsdBGRPsYUJyXAYUT1TW7h+KW76qQMQXlOHmUxhC+EYyEBOfYg0N4NUNWwsx+K1R0e8Qx1x9ATEdLEF531mFBxc/Pjq4a5scgjRDkCp4L78AdRfIPMTE6bYZHNuyY6bEFdFtltOjR/1vtPePg/d7ND5gM9OmKWAV7+5KsOHIXC9shFcZYy2TmW5kdxk+xdaKkKQzmcAB39UbW1PStcfu6NYo47Rm0H1sD6Nvx3hw4+Q07Q2IkYpdUW52TVP+KrOStV0WZUaNrjTC9Kx70Y5fyXIZNXNgLTKRprP+0Ivk02snmDFBpjtSzIf1IG+9dtQRpl384abwbKex1R3FqOLOb06YrJ9TYNbqxUSH8TJR5a6N6SqZMeE6y403ML9/A1drw2cKCCp5OHKEXlvEOWTUm8BzIUfWmxo5b7s8Pmu5FEO7TuHkZxtvxuVPVFvyIDNxwi1PglWIUI470p/B43ncUaXclFvk2+JjgUv2kQ+t2NX1d7LUUpiHkNW3o70eIgkz88APThrjZ4hCm1l64nbmF1OLn0RPRpYgUZgEB32Ok8yokrrVE75Xn617cILo3nrSS+dP3Gr5h14vUnv7ZWGj5eXZXNYd5C11WUKBZUhxlKSnGc3PK2ezwbJA1aMC7H4VrJsq16CpRJ2QEQx6LmrVJzYFYzmxG3f4PRy+GE3XcY9gAwZqlOecDKXq3M5LPouLzfQSaNEBYCRib3w3uQWIWR5Pds6LuZHHJEvsXf5pzt9gDIc9qn3wn90LrqjQgtO7pDJ6FKQJZMPoRKumCX9RypYNI3tyZNEcBxun3ELUE+Khstb7QTHMLB8Obr1ZGKEi7tV7oZVkC8iV+iRUBhDu5UVMa/Yt3UAGV2ZfBsP3+xs55Bq5NYftqOwJB9qSMTlU21yfUqWxW564itqklSM5G7YdGbcc6fq9uciNYdhbk57nKLkP+C1dIejaliMsjYc2Di1B0ngT807RnTjJeZwpiVcXH4O3BessNSztpG/KDn4p9I+JXDqW/LdASYW4Umfn4vKJxgzpXn+k+WgbJECpw5KXjX8FDSjbSw4FBBWjRjCY4BeEbBR//bO5GO4PH5th4CAUtaIShbOjRDkvi67JWQ+HVP8vhlICJLax9pyKRto/a0lrQjVG5cU0CddaHxwyfLMjleWOnEW3RGXow+3UdOysVhsrxTRA++HyRAwlcAnlBbyw/YCqzRvo8sP12UzSp/gbCC7Dm2G+Qot0nQ2N7dPg1ZXFhTM48hGPvzOBN1Y1PZ7HMLIlZmSXWyXEf1ArI3p/HolLphmDK8/d4XYONKyE934Ii2eXjowuYYiPgHRCMC1PRDCw0onbe/mGxHNZXlHU3xs+U8q2vE65+UGu0qHq4yvgxoDPLr4a7tOl0GR541Dk/LBjsD2nSHCf2tumat3qrVDQQ1hqPSBqlvJaUVTg9503/lWt7d/FMPCev8HT+Ys1KImTT+RxH3tVLpPJQpfeIWF9tsM1eNb0MCEF0L/ruJsR030J0HgNmQYiwcJCWsw18xJOY7MuGHznJaLZjmneT+pYEwPz88brl6Z26rAZp58X+l+NxEL7tzTIbIPsgBc7odPnfjbej1g4Hd8c9U+MxAjMGYEng82suD3D2ROVCupxBhbgGyy4Abutw80P2QA2K1DoDU2Z3UqFNSZXi8T/+KFZrHvGvhkzNJrTPlrDooIPAgOU+JIhIjfyW5QYcMlL5FPFtoTftIIWj/Pif0RTHOjwPL2Tu+qFY63Qs4/yxAitBF3a1wBNFGAUl5FX9Rfo1aLPI41ykaLlqIYKSZLaM+Vt0os/4GTRF0hmvbJbWgjuriCl+Jtf9kg4+lNZhq8TLI6RLmpH5DoNCQ+awCIw5XZKLWnYtel4BoK2skD2F3+a9XuX+JPGywjaNd/BVg6dbC8Ff3hPKJgwBnj/uJP/69NqPdYwM+pCWC+ofN1wVPjwwLf/OC/z4v0oknQ94XQvvqiyuTINOiS2wL7LBCZCWPpu72Qqq4cZQh2Rbn+lBIVbUL6AKkM2His47waVSqOrSoH+mj6VBnokLya5JWETUBq/j/G8r61r+eYXw/aUmAHBiDsFZuKGThc0i1w4rEu9n7jv1eq144/lqL0z3K0ESH9ZHjRQTi4YSQzqjjmC0eN+DGxCgfuehEy4fuIwG2BMbi0kUlX94Bjed3N92dHv0Qmz4WlX02Kla8rhydN/VuwA7iF4kIAdEmnZ49+4IkglgVqjCNxIttCj/4F8A4383JXH/1Kq3D2fjRv+2Gzjhbt/UTeRmLnZxqPpjO1SIzS2NYMoFM/vlD9zutxJwOvO0FinOwCN0GzvJ0EbM6mlqTsT5F3/ecqmK+IFEfblvSfH7q5DnwJJQ1BrhKF6gV49l3SUt3DUJBd06cj+NbC3WvLf11wkA5hzurOfQUCDm4aTRx6qlNaS79+jSxXsxgokKWc2HpyuG0utEwzqV0+6daXo8hWvguUepEw7hf2azwCdDWNW9NluemZTjrRW2/PkYkAjSY7zYLn3GKDmhZB0FyJG7cRGZ0C55sYdvjvQmW5awk6aZoBREWKydr6Qv398mHipyMHTlNz+Mmd7/+e4U0s5wVLCS66pOCFOdS+wAcyndNygGub87dbDYOsabqswHuxj9pk/647uQb7mQCnkmd7ash8moeHlP7v8JyyWP6WuPC2FAO8HNBD2Tk5BytagaN7JPDyc474CVgQqgxbp2b9B4tU8cWUMNzF2OfM0urBPRnPy0BtX01s48GHo9n9uofO1JVtQ8wQZFCx3ToSXPwgaPbd8kAqTHe2HQAjz5X+NL2wjY2gNmAZkbcBDMefoMtURfHWQcjnJyBpcZFFid9B7mlGzOwXGshjZ4rqBz+tDDYf0Aaqsp40FUWGkICS3eBfbhd9nCrjoZZY9YuujRLrs7WXongoEN5xeMJAtaqxfrHFuQ6Y2hNehaLcSCc3ayXPv6BUt9IhLWkSSPrO3AvRDsK9/o0B4EmL3UxFcFmed2u1LspCw39dB92ESgJ64brfKUJa0mFu9/8x0osDeUxrtGKcmrYzHO1FSnBmVLRKrJCKyC81XHIdaZPJUQNpNy9AHEwlE5lrR75nfXwKmWKP+Mx/CBOf32r1odNVWRyQ2BXR2qA2SuX0X5PUK4N8RFuL2QAKZEnbhCvHMatf0V0D8C6jfNcZT5vJBQnaUFb63P1gzXiekf42PL9GRnjNzk098LvOC8ir95IV3jO8pEtPY+SqwlIZPmlVM8BynEThnVngypGnhFnQIwsuixVZEX9vpkEyPHGb/b9xlAtgPQaS42z1HdiVutWLAryPZF6Mr3awGgK+cG1jKWkNIz8ql2LdRtzzvgNkBzzCmdq3E153mPIHJUjwP23Cv+Y15ozB/Ff3xv8Bfp243n3zlIIJiP6siWA3+/ehVRSLvMUL7J3H9m4croh38jOX9ZHWHSv9sr5Xz0GnAyNEpaAwA/Ow/JyEVnbNG+SuTm6ywcSyGlxcuDjKyK+LCI9Jpr+fJa/4MzzfJ0Y4JmuVN6evrqr9vAwzl/ozNhcaMASTDFaWWG9cGuiGnvppN/USPnnkOIFbZzSNTczQJyaakQWwSw1DRYRREsdHBWi+C3bOH/HM/zXjwiR+RAWkFjEpukqgheac/zcGGpeu9yBshV+NkGrobDYDuhwgnnd/IDuMntjkukO7ooXU8TvIUouLk70h9qb43w3lwh7TI32YRkFMy3J79Q7S59uYihs4aucwQqyvd/IxaKKYvVkk/aC2FfTcfMc5/oo3IPZx/XprLkUJNv0hzKwIY7eKjscI+ybj2G+Enc3WWQ2gqNLRrGF6Tn4/gbsjcL5UOH4sdhZ/FbkQV/8QPoJ65OqIpb9r0DHfz0j0XpbDnj4bBdin4lgFYmbSq50RDN2LRu7/C3KFrrIJ28+N2iTSVZBIAP+/XnDb3EZsupDLNQovsBjskW9FegtmehGGLRKh/d/osTfYec4nt+WL3JTFVKDyW2WS2LyZeZI0efOoPUrxeciowwxBdTRpO0ksBzl+zpcmJNdzfEb1WOFknMnMg46FXNk/sdYzxacylvld9b6e/HdqeUTUtJQuFHq/wxRqA9RDbKPjxgiKWj1b8wDfxxGc1Jw2hNghr3J+d9FiSUD3nylKQQbKt+TOkClm0WSLtm24na4K2iKyQjwnj3jaxX0t8oEEuoLFOYgF81gSe9NiauE7au5SCkGR+56gAfyiPrUt45bUPSfjAZVRp72VImAeyuL8blHnXLLMWQHFiIDIIC3ORrbrEOuJBA37jAoWjpHU4JWlWKxWeG9MnJJfyp88d4ZHHOwb//luUuyfVVVB8BSKXWVDZJQNLLetNbEsULOXi6Z/Iwyk+F6fV9h1j5EJmsft/S2grauTj6rKuye9KmNxPW8Xa4txN1vqp7frpQpgfrtgmuIHsEKrXtRQGbnAZnx9MwXVM1y+XEKldK0IbEmqCScv27B8dOCxMbp0zwl4TbCYAtQN3dr6R1hW9AuQro1MrGmMMYN9fNZ7O7ZzmvSc3fE2J1Jcjg93Z3msSTZJ0GGRNRXIJ7GxGhzx85jfaFCPQhMo2jB+swncKwiASzqS1RbROAcMqBMUZeP99xnxj8M2FfVhGuXpgow3XtpZKFx7wRA4qwoLyS3Tnfzb+Sk86KrftlKiuJaK6mCMM4sS32NDDLiHN6MIem1XZqC4/n4W7bSRAQPjNb5koeNl7USLcL1DvqSz6+0tWKB+SK3xWfEVGptDzq1NwkD3ANuHxpFpdwm28kpxMJ7CsXyPjYTv7ZvdK3gFTDqtY+O+ByCnlK7siT7I5y8cnJ818ukIvNkRaNF/OdLX8jGp8gjHXRliDC3gfhadp7+M5rQ4yD7xMLkZJ7ZIm6PP8UEoBxnk2tbi3l6W9FULOgOV6qZHB0g45UWjmI0N7bMhgo5NAMtarxZ3MiuhSadFOGeCtGxh4lhAdjNdc9aKSTV56DnFkF/zuuUeIpb1VYbyWUXN67zNgkRkcnxM9vDLTswQ3pkfW4DU/JTMcbkVVqt9uBQ8rPbIz1jL/KU8iSfqcqdUYIkGMxwkHW9RJ+KrZcysUlJ+EAqmudgkFT5Y/DMBq2cGgzjAFz2xU38xEZQKIVDPnoP4ZtlEz4YPb/N3pviXs+Yqav06qMQ1RbprIKKPD79ie8D+kEMI+QpHGMqpgUGEo341WY5Ti/VGE0nLvR7mleqBPYNkEwnioJgqT5VCjjbsyGaA7zIj/feywPSnrHcoZWPOXycVQA97MB1KuDQ8W7EMVmeEybEgUkff6eeItUxTLqXA4q5BWURo+/vgCAgK6BsJSPRCD1huUexASjhr+wk2YUsp2mHdqj2FUPLwC/O6nzTFJ9hB0v1YqDfvhFDAY1LZVoGa8/iLaivIrkf6H4QJ7IfRdI2Sg019t3KeOLM6MGh0yhScH2UCFAeyA9y5Sv1+gckVg4pxEj8PzNM6WMA336W6ZktIcg74AFjC0czB/UwACRoo+lQ4ypWI4ZucbeFdqnOsTqX4cRe4OTFuqk3y9NEZE5cWRQjCWP1m9wrYFqkiT5WXGpPH78ylPbhCuReDkNHAOBdb/p1x6tCPVWY+GigRVMeLP1+bE46E2+E7xD70CJkr/bprizexo4zexIW+Ye/GSasuXnA9S1+SrH0MQr60sFbg9RSZj/KJKODJYHvxluopkTa+ERayLKCy/vDRrGKjCB4XoEINwUjmc84Q67PQlxaWNLI7c+1i/j2uta6t0miOBKLN7lP3kTHbR2clEMSvfrrz1bgtMYAbBzyj8i9An3wuKhQ9aHjoIsYEzcfqW2alN5uke51tO5vYmQp34RLy8q9F6YidN9eJfa3hbFBjN9VB3Q4f2SgmlvEJZVDGs7PMIrCWv9tp7zXC+cokeuQ5du5NO02E3l5VkUvtL2NuHW8+Evx4ic0G36AGM8vRPBrVt4fPbve8CgWoyIhsvtwTpFeSLMPtPTzrh/zrNBV9O5l52KjjMxyc55hjQqXUwCcS8aQyO8yroiI8VGra7ZWb54oqxAV+wAT+2TxkrMVlO/5aEPSNP0g/6wVdeq0B3fFrwUx6TDbWTNbSiOZrseO6EGTK3LdAWxFGLe8NVFg3gSQC+fvGqfyzdL2kE/ZGgTOAK/7rwtptBf8DNSt2hxN5ver7b8Qkiyo5x2zlEAycSdRuOf+CmkZF7Vh/tQA8P2LqhDVcmY/kIoYRDXTXQnbQoo1VrmnmdFS5RW9CjpgvvB55dYYJqceQltfZDppZCYVRF8Yy7h/NRVsaLvFPF4FDrKQ9DpJfe5aF5NSXuUv2L4amCwUyew+QodgchZcSW6/eK4l+UCczuk3O2AVKwTp8/mW77hwa4ZUa58v5UpY9opIPbdKKtbbWbMpW3qD8mVeA9g9oqnUkB0RBGSvt8TcXdVQkBTAuexIXdEymd9Vii7WebEU9WRmCAzd2UT9pBSikeyGLnvBnLZSOKqdFmjCyZNQ9u8tkfmQoDmlYL+wS+9fm+iz9MlNJxoVaHbREvcozS+URCPnSvTkGgcECrDmMP4nmylZ141om6xPbrCrAaIuYcHscT1kBpcp4bM9Z+khs18/z8iG+cwqfbN9wc85qu02wO6dBTeZ8awFrET3xZSVAd+mvZ4bfs0yg8rRwCqNZ8H4q5pZfRHtNf1sV6Gg4whnwj+HlNbS5tbQNnhDAXQ8cJElkvn4AwtAA5ioYGdUBQOiOmZ4HSM1QhvaLXnzJ6Ze4YU37B/M8yaEFTIiG3xbzbQvKGlI+nO7f8k774kwBl+MoMFQw2kZkWTf/6E8AxpBJAkBxoc8ovxhKGopRCVDpoGZHnfWwNE8AzySwrTGf3OKoXibGxOFdKIKh0FOitBWFTVHEXb2n7dZ98ztgwtiMx3psHttn8XXe9tKVD0qsE2AJTMF6YwHAwzY5RXzEpdJt6XiCWzpqE9v77IKzDz+y9thumJk1MdHPMSGDMKIzG5Hus87qgemGEYwv85DFYuBcq17RF9y7WtSFHA5zysnuFfYXMSA6OQnH5aHc6QR9q4IuWFgHmeJFbaeD2GQyTwlTXVu+H/09qdNjRCmjdGx0aErvFP3OJw6NVkcfBMtdudJDnzI1KnE6uMlgh3RydHp181dFG20TwDCFbbUSzIIHophU1vvvHVEioi5DIcuf9+WyWQcbRzWvR/wTh+01XSv97GSmVsViP3BhatQ5QWr1W9sbtd6xt0gIHCAdafeKcwit+Bmq4NHr1orxYR5ykTigZX/3zDKH9Tfj9HCe7OrocX9dW5jqpSiBi2vo+Vzg4ldNu8S7fjn/USZiwr80q4tmkttS2OOSY2Ac+knSWE69wkRAUMBhW9JkwZLByK2Ufx/Qf4eTGZo+plTHxpJ+Tr4JHeoFMO6HjXV/c0tAQTfhYidI5AfhRZqPdYTXQ57VoOCKz8LEaWnekZYxeiy7gqhY98VWGFLQUUn4XkJVbb1XTV+D2CMlWOg024T0B9GMtvnh/xGjgrrlG7bj4xKnuuRuOR66olSib2BPoiWTdyXsDREl6tEYsiQllzufSIuuxfGxQLncCFmvOg319D0usyLLt2SyzPX43KFmZt8SnruNHmB0ptumvffz+lEZ1vfWGzXC3zHO+FBac0/Oc36gsec+CFXgU6p80pazTbXOY1s5P/O8NRkW0UptZT0EdP4ihuyZcfebj6h3Z63HpJwN/0LSVcFOIylhJ80Gm1kRSGwFEJd0QKOw5wyglCgMhzo2apx7L3+GHe9GZPzYFStKnifDjo37bjP6pdB0/EV1g1vYBh4EmkuLU4pFFVjingfdbm4IMqOxBzfDpA4wFL3M6H4z6M12E+1B379uiFUW+255LE0Lzy+EWgR4u1XNC0Pr5CJjVxwWOC4Jjnc9r9FMUmF9sK+JIWSmMw8QFKBsEOyJkvW29RCO2EQ1rPRmhLB6l8F/rLmbpr6hBRoAOVB+WCK+Ma6jpQe4w/x5sBDG3G2LBc+JN/I17S62xPIHs9h5qdglyuHOeLwFAmFGwBsucz+NlhbIh/4pFjw2dG07lQraMCG3E+/lksviVR32IPv4oIXr4Ppq1Bw5nCKIQAGpwQ6pyA6LIBdoBlRaR+qScpz6lFbweNFPBBj/ew49g317/09hpJaDN1Cz3T669Vl6c7vkzSHu1m0ylmUAAZ7+DC74elfg+/PFM8SoJwktN2M0f+ImaTof2UIdxoZfwg1KwiJHy0Iv9kwwyIJD1uW4933uYshkMpaOv72SjIvNL0Sd6h7aa1Vx8mwSmvx0Q0b6T5I+Paewn5pHkBZamLzQw7AxHo0We7Mlk9R71tZuB1K8lB30JOJY7CcQo8nAczqzQDMT0NkI5Emb8Y2j2AyXrtty/MCJX04hqyYco2IsPdiUVaXxAMyuXqaMjDiAsqTpWbumxMXvBCfv/BlfwYZ4ILvN50YNy5MoQgE1cSzKqvui6SQSGjk6kauKReIV19HJU43oRbySC2kNN9Jlgb9jhLDkeqjyAaOZmuGQIBByLr4YevXLGNB7Xm7AUpLf/gp1XZcG6qa4nEmXspyHdIKIUeI29PYPBTlEeWbFZXh3J9y8mpWXax2ZCtCN3m2w440CDvZ4vGyazQtMbTEiHOPknbPNPEkwStW5SJ7PP+GPWNI2cJlU6rSd129wVppbdLqrXCLbJBsgDWmT13e5qopieOwibXNihXBVyDK13ASCaHvvjiduQxX6fHrwMIdhmVfpEJGf2jdjzej0YotdD2e/hy8PvanTij2gmq4ouEc2TgWt/MoVqucSHvXpz0SCxz1LOGzeZSocKBeLBqf12WiTEpPihp9W53+DXQLuBCnQd1CH1snV1uZVhftqeEIM7Cd5rPVv8JQvrvMi2UjkkLWGMEU+uO+dc/38WkYgrz73mPrqm5cfY668E0nnvWp7aWCfaoIIueOxqBQD1+tZmuCj4kwgbtBoPftrN5a8eJTK/79RBJpHZxwUk3fHiFaunHhZNRAe/dutmPGBJQ4ZO2+ahJSjuh9/2ed79qNydyIFQBhUlK2yEZ4IpBMHfaPukE6DkADNcVQskvR/Z0W/0xSUNHdlezS1q3mh9SMWrlGWD78sl7nikw7Yyz1cl06g59yvHfIzHAzTeW033NsaMDJze2tzlrr2mG31oLqiu8XYQNOO+JM9KAWAwmGes5jTgFerHaUNxkrmh+kPWfCAkEF255wMOpvWNO4bEKqSti8jOrRB1GBxVxQP1bhdTWg4CtHuOCIaw1OfEIgP+GXJusvPLACd3MHepHO/9invYXzsPefJGpo3zvafkM6XAKecHxOnFYD366ZzFxNBHmUx4Tez8h/sUo9uEbd9f0OmX00OXrR/oBoKU4QS4lW7XLmsJyZ6gkbpkXlcgh1MVBbS496Uhik73hber2oVS2rw+ElsXCySwxErbAnp15BvxA2jeV4ui9zi4IYPxfqkb5eRa3ZY7isdltgGt7YLWy4xJfgbfhZSP7GTWs0agMdSHC6KBuookBq+Z+YtuObEZOzaA5U9yHnFuUjlumL5ozSqEWMWQGT+fO9Vlcu3Ur7VNeJGI1rC2smBMS+dnkoNmKmXMIcAlBxW2Ixe2Vu567WFjBCybau9drVuRmcVYZK5Jv/eyhkKnz7jZKzlGottffbcg6W3H2E4NmbjJ6gVMVtPbuXGPlducaAK9vrGYAJ0vWDog3tgF0Ss25cx2lE2jFRDoQKeyUSdeBAvIqOPMj+dMtsGlI2c8alnqaDHRbvmTLPpssKl18375c5wPpANzXSU68EmIHIblh/B8xD4UJorzGRGFRoHxq6VOiqNJJrB+Ujz8e70OWKgS2rZ94twA6AAk1WIvDZNOfkj0o/5Gqb1iPWAb49mVbFoSCXvvz7kxEKQDw7F7UgqTi5YNO9f2GFFP+F+AQUolqLdk3QjjRSmW2/bU02sDqYgfTEe0gwPLYFytJk39COA16DXWsaCAvKiWTaU90ahtUiEsZUB7cFRvD6z3rLM7hCPbTq/S075wGRBV7Hlet3B7WIi6iiD9VgeZkPd8dtbEC1BrIzV0igUZcAT+TxU3CbZOD9i8JoJEKkrSoriP578fShKoeU5KMCFlsv2q92GG1WTmZwQX/MpcycSCY6Gw2XYqoWyat2W2JL8oix2AuN3P0FZAhjVsGIYxAiscsdaa6J7LI1gDAVqbDdl3f4j/dyoqfwVWhaG4RVEYyuDb+JhWQvHhZjDx47zg8YUjW62bPiqiJij25ctTzWlwgaALukJjlMWFNy+jFu4gm+Qp6+5USLyGWN2PwnN1loTaDaKB+eTRJxHIPj3rLa7qnBYReKmMuFg/DIEe9vTpIaRKMu1CLy7ueiPJV4XzHkTeE4xLGFCH4CUYjbqJHxOb6ILba8XrWOun77Ne8CUIqMamCHytLcqfewInC5QJlrAAuqpG1DWkELOvl97lmhbnBWzVQNzb19omLLgaMMFP2qDijXkQrd2f1qqyefFb1FipPnuXwB5Er6RhHWoT5tXThuzyyjbTinU6QCkzluUaee34SXJ11NNkFeKxEFh5FhPLANIqu3Gl2STAYviO2BycPt0RJhXF/jCgl+e5aVcYcZ3Hbaw5w4wqGvlsWSckFGhKzVsGW71PjEzkLMgfSZCkkDfpw6WoaTrfYDL3leTjQOgRnDOtQvalULP2+ZYTVFr5ZB8pb8lcxnCSRtUYfkVudrSere+6Tp99+p299WtZ9ABIwH2XD5YM+VF5XCwf4mG5GlQ6Ya2uRTp0j/a/kY9K+4izV+FgBlBUTuDX6bTEHoL2iJqQB2jnNMAyUKgIOe0Z/OfxZZ6tCmNHSn8QU6o1ulNH4LtysQUTEJElr4HeinKYPNPnWg20jGt7sq7lECXRuxjJon74P9omLLOWUq7c+SZzxki+IJStMJO2UZvgLaPxzq+ATNMfOwAgCYvS5as7J0ELuu8PnY+OtijRBXIIXg4s+1/maakxEtgJcjJK20FvXV635JIm9cR7jgkpLkSXSMOI5T/FcbvjhWqRbqJJbjFgyC3cG06bfrLZtDRDV7Lna4v58jOv1h+R3QtYmLvZD0JVQ9JnPfmZWjZBVUKgI2D335ch/51kShJPWzDPiqImGnCTeD4ZsRvsVoMEqKOSqzFRdjE88b7Kg5Ddy3nRdPaKS9zWJSAb6ZpsGyT5I8Vc6tg/QAbijL4ZQ0oeKg/PTTRwmYMDJh+s8tq/BfImGHllrHqAHCT6jxIWz34yIrc0PSsT2Yen5IJ4Qgl9/I2ArXzjFeveTI1JwY+JZs5WRxeDPjcxKD+vgLkl9O+nFF2ih0HT+cVxaZvHCctcGjlA7UUvbiB2q1wwaUD4VGl4ttfCpwy3IS4vMVnwEGjyXDmMi3UqEW1iDcpRf4dOQBK/MbK90h0c2ZevEQ68t/bLhIKWNpFP1PhBr59jkaX24lxIld7za7xRx9JhfVEjP1HiUDRv6z86xeUCKFtgY8mZqrfpQ2R3udklgS/+If9RYXLdEPRw9zqiJmXkXTkhJTh442idcr5MXJH2w2w06LShSh512peMeqHvfPh+9yHPsdMQrAl4wmI0w34R/RwrjQ8T7uwPwB/uKmQuH9JQkW2iYMdHoM8xFGKwlJy1BpmdqdwOYbVEFgv2bNdeOggjPS5vo9/3lupJ2k22zAIvETpZP+wtaerHkvpbb3J0Cczl0hMkQKRml6eZ74xx4cOTrixXRK3v4EHXMa2I/HMoZzBUz9iCXYsIb+QFc6rTKH70tScw7HAg1g/EPNvfXeUsBhI2IFgwv6niTQucuL0r4fa+tpIYnlC4mYkUGjfio76OA3lgGtbcv4McyInvVKOtuYEugikZPs1mRI+OChIQhJe4BCeKdTBfFOJT+HR7HFD4Do9qwQWxm2Ni9xAqtSBSlf/di6mjYyyRDWGOCatlF6iDt+X9yUaxlF+lthzvAcqwzff0mzZT4wXTiAbubYOvsnrsZjn4aufwbKok/+udL5TqEizyZ6tuIR0Obt8bo5O6PW8r09G30wT2147aOFykFFNNY4U+0NNChZK9F7qW114AAoyXSdBS4WdoJbeKV6317RFFCFcaN9r24I88Af1wiyjM21q+IdFKvcAUaZrbgVh58uCo3nKZcjcq5/hblaVqvMj5C+Zo4EKHvouctuvYMDteLg05l9j0qIBwVn4X66q8ABcqE0hKVGduxH+09JtpxVPKdhQM6N2+1YhJ2iV4+QMa7xtEzO8v/Xk1Bjm+qtv0rMOLads82LIOpqTnIeUgDMEGEPeG/M/kD/taVaSjgsuPy4NkJVqNbUsQhhN0Badh/ALWARyomIxZzSehSusjjn+9p+ItilOk/9u0D9ZBlmMhG93XTJVsSn24ZUk8Os+ILCOpODYJ7S9+9oMXhyByJT2bkSPjrjQjXggAG2fPAqjBTKsUHkklOM9qUODPu5nRO8xRSLKWylBSzYcgruAmVC1zQt1HnaejeX+Y620Id/4ok31I6Za7JKuPtexi6Om0C07cWasc7f9l/Sw5AWBS2eCaSYYnKJ1Jqm2HDK2WdcNArX+jddxW+yR/bFBvUeHb7GYjosW88pk6jxJPuv38OFHzPw+oTaUlUhIeCvHkctDm9yXpq+CxLhhAtDR0cRL6lgFgBn/yIlKP609yA3nO3bbziMIpKFhMvBU/QDuaeBky5AGKUxRach1GO2E7sRDOLVbR9hkqjrxTE2vUKtRKa+QaZz6OLyMIi4OuIlKR05i2pnzeMIskhrEg/nIxi6y4BFLcX8RVMuKEV/RpRQjij4XyBhl9N5UU5KIyCFJ4OAIn5CYjZrMqjaa1bJHwnTnc7lFDoA+D2LDEmtWUdik6vNXJCuTn5shWfYuJa2RdBekWCfqQOdPEv+SOB3HZnp+9+Rj5bIqq6qSdVxmj9FbN3C+7RCspMFLtAtvASHAqgT6VaTTYNtF24fDyvJDFuxjUxDenJiGczQRY2zhU164W6xzKwGpbLH/AptyEmNLy/rPL5LvNCYJz3hFAjI1Q15SmdDPdkdiahjsP8Ejeu0yOcgtPSEoMwKLb8hSG1zyjCPDKA+V/PG7fPsVImPnueyvgHXGNeujO9nldtPljC7us3ZysCzlcd+RnbL2gpQXuqtM37LRw4cKj/WqYk6/HvJZdYm7F6+9qSr5h0OawpTDMMlFgPHWwnjcmr3PEhNaEFtXnNcxCw+XVryZ8E32lkajCYxN4CiGR2hKXRqnqS50PK6Sdk0wkNHqXMt7O3IKtpbjQXw5wajXgryEAggvJt6pvJIqqjcaDtLJH0He4lsA9+wqMBoKp4pp7ze+/1O4mwHmWQwwKj4E+K/DsZI8+SRTHuILynuMcJtCe9wB5yQBuUpySrPLNab9YqrElNJq5pGccbUh2mn80dAY87G4XMRGQSrV7QBELCkv0K2dk3Kt1K3g1L19MaR/wd9G0y0KleN9iGU0vJMRRjEsrWSu+R+/n9aG9BHiVaImAEkRa9oj14Ck7oYHPuztqIjaL1RTdosSjzVcJmJT8VAPdXIcxGhVJInW41Iu+ReM8QorO1FuC3stT1+q1drgt/gZjks6QyFrbv+yw9sCG9fHVL0Fx95rynwHyJP6IQZCzRyIPnpxLEQs8sk2MgN0FlwmrqsNaVn/wT96T4JB3sQUuQOdoP9lsTi/S41Ibjfi1s2hF1D+ky/G1UxI/4rP7wGPpcthSznF82SB8FBbaVjpl8c9JHZ7hKLOsoYCjQ/rWvFEJtyVbgqriI+wHoTK8q7N3kvwz3ErOSY/NbEWpsrpIkR4iEOYgk0IGN+bld9I3P+RhuPQs8MOyoVKGfP4pBaCIHezX6nZUtkuokQKwU4ROdgI2HOhS1Hl8ewrQ75gbWXeQVCEhBUgnIhEEL0NZeXCrxlEwCTwAPdWDA0j6VjhWKxsc2gb60+s3mCBA92hSkypHGCGf1OnlXG18gtXi+2vJexEC2jjUQY8RzpPKL1+NZ4LdO5hqxKrB/6obB++L+QCXwEg1ml4K7OUwOWTyg8SDzoiUi33wBBx9A61EU7D+uLkRuPMvW6dEbHOz9RtgoV2o6suZpqDuzfXhcj5kDtlhOhV8B4VuQPZ9hvxrJq1rYmhZr+KrBs3YyfOsb9XVvGKKtuNruQVdBLajJFTpukwVl/KEff7WcldbccqFT2VB8rjZo7F3hoYgnyKUodid2mR6TjVHJK2TiWPEjCrTduAT4fVty+XsxwSTJUoRws8/UfX+FOfWLUtEcOqUJMvJ6/KswI//GYGPf5hv59qz1KxvQ4gxfRKfthxxhRHvcJ0XJh4IAGLvLtY+zP8f11NtrwtvvnAhjp7yhg0bYjqgI/kHBmLsErUq6VAkqJHU6F11zvBl5D3NUSyf30abSRylkPuuSsvZE8AyYP+e1xKuaZfw2tp+HmJZzz3tnIsSIAL4jRdCtX7Cpe/YUOGyQYKrsiPbozCJRfZzmQa2+pyqGBHK8yRdVXD0609tVyFSYTCipxHbJ6OvIcXPOzAhjz/xCcoBvid1y0iPvgMU4SoXRH1uqOKWtPH/sowG+PAZvFWpyQS4VQgiArCqXfdfcS4KN13j3p8/zwhe8MZ03qLcl/6nPtT5/PR2/EqCBQidj/RRRGJd5Tuthp2bCtiYPkHkjZKuyt5umh9zxRvZag8D3dA5xysg3CybUSfA/MJdB6P/5P/JqELfcvMmzLFKKCs0tME4q93ddn/PaKdkZfGvR97dRs0P6smNJ7jWwBl74UgZV7q8nVlCJlYfEWUv3XopsrpNUxAS2VnT+K1OLQmmuAtmSplElssbTImSYf3zw/JQL5Fy56C9UBb22pRieep9ilkqxtrgSdTjkB543a5t5sCdyZzwiew8wEmm/PNKrg0MZthZ4Tl5OgLvueGn8psRrlwhnL56ispmjd6MmGuUxippooU6qGE0VO5jhR7piDIroyfiTM8c+r3WfOjcs+rgiuYipZXv2hMO5hcDKKoPC894RF5vu2WldHMjFQTgTEQ0ypKOBGB78rlicP7jjXgMohUjRJZim/DN1V9FF2HjOL73QE9du9/KM6egHASZbDa6lyrIxqGFGUoDZ5Dl0fxVNVDTdIJvayy2dpUHh3DbVRxSHx/ftLnU0iXVbblGsfePiPKiEtVupiJ57TxJvZe6soWcnjDx78nx/QXR+kxaK9ekJnFm6N5xPOdcfmJYiIRYR16up+EoflsFGYQsvhMZyH5qpODJ4UxJczep+OHZ7HVb3f69v7VAL76Wzaxnyy6WFpvWxUTTsXW89iC4nC8w2XbOP+vEIzxnttKK8CDS6Gk+qa5puP5TQ0pTUeO9h1K3BgJxAoO/4ubkUoXb8WApizhN0ISJNd3y9DQC7qNQlZstWsuMrw8UJ2bC0OYn0h7u4bGyXzjDPRtslDZSIbPjA9uUrxoo+9Wc/JOTd2sQ3EdBn4WoIR8lUlbQqHa+HaT7boJSOco74fhC4/t3j5GnUyAn/0sORBT5IPRu7SHDIRKNOb3SfzhmwuihzRUi4qlotGpjfbczuzaCfxRlsfJ6Hwp9ThAdX/XOcpkp3WpMAlqIqrCBEi/qBY+pRwY9Bk6CDZgyMncegBS3yLIspPvXp+rPtBdYFb65+Xn7iDQ8Jh7ft/8kFOIUqzrQHG+j8V797eryB0mkcmfuWPduGQYdCfIbF06ZxjwU6fqBK1lzN4hG6cy/w4RomDnQEpdEtCy9meVA7HkEArzyJypJQ6+A1kozP4UOaDSrm6KxId8b7VaeCArdPeFv2WLHg4jpSfH62fuhTxihtCdVEt737yGZLYkPcJa9M1MOp0ILco3VrZ4FoLdoeDqy6JUV5li20Ta5LCh9FbFosZfYzRTpNbxHlTCATP33nbuESpCzWh3pjYTjjsUH8LwURdehZ89QAfUAcg62HU2nKZcRbj6A6gX4ArTGeTRtaAiOJFWHL0jCDqUk4t9RHwjqdfq9/NlYxBIjWYCkSVeXFs2KdAMBQy1nB6O16tAG89xh96MfLO0qUEq3bOcc1XKIKt4T56PhPpVnjzU4HFs8cVF5nt7M5MWjxZOK4d9g8xd5BrxkwPn6tu+SgufBd4bRel7gDCJCq9CtVYiqsX1oxCu51See91Vs5QLE2PAS4vDnLUyHykuAGI2+3G3y1c1utJePXi8gS6rvYp4cHOPqppW2v9FjoK2jlFlBSi8oTxh2IekgZp5mGgkgVTkInbWH8IITMXrB3xNm2MlWgOAydVpsTgej+PcP4CGcaW22G1I55npfH0asUdhvEFASB2wr6oCoUCE1W8u2X5Yq2G4Y8jG4/osi79BBxd8Dr35mpjygj+lR+92yAcosS5oyUay1UiS4w62PD+DbTw+IqmbBWKfdTXiG3ne8/1Q0A5gDYt2DzZS4eHgzO7DT3JWS2yBeN6jtk9E24ZHJ+cWs4UkKkiuE6EuXV/khh57CWdxag5obhJZW8tXNPmX1+tBaeCIcZSj7D+JqHsGi7gE8P03Otb3OCUjqT8g/CqeOn2/1LVGpExmKoS/PERV90zEkOJpjX8vfpeZ/rGiMjqTn1mVKK4OvsbEydfjn+k9HQRBy3a1IcN24bGCpbeW6Zq/u4EZ/54iWLglC7lhRJi6VFAJiXEaFKOutEVXpt6LaCpHtMOgjAnG4K0LG8pWTQiuK5TCf9jgPp9Ddt1vusgtTfa78Ktf7aLHEaVL4POGlplyeADLf1uHd01B3ScBv3KaVN6LYofYjGVdjlnfi7EQ05/xZ59kvKtJ0Fk0Y4ImDTBEVqUcB8hJUStuDMKzx5jKmVvg7AjQPfSEMCbdx0L9rOWVLy36LfyGV4uVe1uPRicZ6yV0fiE+B5glaN3l57sxMBBtuVkCsER7K78kVPN6Oa8OnFcLYxYumifE6UGC52rJOZ3Wz7gViNUBsR8hw9+L/fRQsAx4H7c3RhjAu7m3nfjjpmLdlSWic5oM2QTB/p86wpuUfM8BLmm3ypbfLVVxddBme5IB4iPcegk8XJ89KELkPIiaMhCc1tvAlH6Ihh1jmQtX9+LBTQGvQ2x2GAWRd7qlN0Ck1qpmhxyYubcCxAwJKXZzqgyZvmWUT9ABo7EqSj1qG8yGzyS+WXG+0d3xWFUMpzVAHgXJIWruuyYdRmd3FLCousj703bi6Y+sVevDRNI6entvsQWzRtDda0gLOoiLvLF7CzO3s2VZAhzratsIDCLWy97VpiLlmP61bSn2HXVXxiz3wE+QjCfrJKW5n4vlI8TKc/dE0QF/lAC90cEO0jwZwHQROTyktzRB3uDP1mLBYAo1NANFH4G3fR1GDqPePDew9KjYkW6T7oX52tNc2zsmbwJSxT5p2g4BiQ54VYQRQOpMIOvA7SgTHfg7kRrF8zpcyXG5zgh4i7oDrQtVEcZhOwS5pXuHwXkJVS1wS0WQ8SJS41DBEp8FMM9Z10feW/mLcDq22saaKf3pvPxjUaPhrB9AS8gG8ctSRhBjrp0oXoFdZiU1oxbNPzYzESj7e95L2QaeYcCLojxEMHi6fpiBwcp+nNHFzRTzsPAy4t8YuTVq0DvffVLOrD2GO6ugRSrDokZEok/KCeD8iOA3dYhmL1Q/TcDjg+V/blwoMBdkavKuFbFYlpiuJYOoyiEWJOXGQ6LzYyyTnzRm/QKHE3jcPl2OM1d/2DzNCd2W18b05Fqzfw3PXjRdaVD/5lVXKSD9o9OEGUpYHxthRYSGP6qi9/UQPHc5MusgRiQaqBhk2utsZ/mulYmseNC8aZWWyQl/VHm10WBcak4P+sAyVNBq+2JVmpN93Ea6oDewJSWcMTife0/g4AL6sB6rTApkOMTDIZhM9Vl/SycI90rXfkwHuHwRMl8OUSJRq8ZouSpz+RmfnY1PareqnAz6v5UQMZxFxQs2wXmErcTfpmutk2vvNudLA/bVvtK0lW7Tll1IEJGKHKcWuKKKJBX0b6D5Wua/w02dB4bafttnRZ9TP4JB52hg7iinoGVE3hcb1egqzW3yehbVK+QLch4tif64d9w8nmuPaCq9+eGDLMfq0d6uMqkoAJ6GUumUEXXzydCr7KR7xrxDwZPUwHL100dP0D0Pkmr8hQGbseN5/cdC9v5THN2U3A9oqwmwRPa04xo8BZCtXtD/ZEb1MvfWLPeqVQFHGFqoLw+uQ/UbmuCA2WBPAV098EK8ZeJSAIDp5x2LPmSIFfYQ6P07YBlWqO4AId42R/68nEC8mb2xLjzMLRxB13ivcTe2f5iY7ptPaRsobzJmbFo5bPeTrm7gIsQhiXwRThMDO1aTPBfalSjGz1guwpFqK1ZYc7PRQEReqJ2m6fdiJYJS5sJU7eKv4JDiGQ8CePOVdIRBJL8Sy9EuhdChtOcRt1SPyl/EGIhX7qj9hhzrhAzqYkkcvPXwt5D8C4fEDKmqXwxdM9wQctTG1jLxhZhlBVYZQAmefzY5TsWseF6hROBEfvjiUTqf7Yd2yA+Ic5LjrDMdHn2lj6n8XqeX6an8Dv+t49/3453Yo/l03wktyrOE2kePJ3mThNGo7KNqSXXz9E8KTaUrQDuK5YDWBtSnQsWOkr+RRk48Z9qqArY/7rrKfyL9E3GXOLHocWGfn6rEMXHrR11Hj1I6eF0mResNtrq/Bptb0lk+ZT9VQ7iGUFB2odKJUdkFOXs0Swi0tiulwSBi7tUBjazpuEGlHCBhpdqy5BmOsng1ITB5ntsx+8XcF3HgDMfMxgJfTPyJlfcf+moNEURWyRVJXd8eV3QrhRnjdxNnKKAuZmRMYAbU3qVPJkNUdZQRFC1gRGLBj/hLYoxMsE4PMIjjUgzfI0+v6rS1Uqt3n3y1CTKw6dEyrsCgrNgjQvCNVppVuM4mkoKzvm3XoFLuTDQHuSB2i6PBXOkRfCDO94mv/qFHEJC/sx6DyscdreaDKOvZvy5WNIdTYK3ANynpvV9fQ/vJzJzoRnj1leFRUSrY9HRF6EYNDPyjWhCcoSZmrMnZAYIlnCu+7EogFXkWYxaYT0M7UDjMnK6K+OCgEVFYdkfPJH1oa761BapNBDgLgQlt3fsc56XDSkSgYvo1K3a2tYWlNyZpaBS+oq+/zrIONsyKYMDiEnuLmYftwDAYr+6SFeaJVMaLSs5d6iTkIktE+kVd6/4Zf0XxXOYC4WABJGWr/8v/4fdxzujhW6Z0MZFa6mioXKKrGFxbU87lJCDI0rRwigD6P27oJNtYUZho5TQr2sqk8XUPTMzzAl38sMext7AHz2R+vskgXjXmp06rYSWPg7vis22PD1aj1eFabWX0dA7uOY8Yl26JX9FFTMLwLORLPtFzZjlJ4AowiSn77eXWy/p6t0O/yfysLjOdfcTpUZvAnD8B+UfDSIYocU2lS34m/Bh+9DubLPkNkQcGhQAWp1tzyoV8SWr/nbKRc28rHXkMiF+n0Ggm2trAkE0ndo+p6f9goAuXFeCEfC0ZeynsyWV7svOABl3S8CJozhnyuykb+pvO8xGA2aEFDPtZM6rRaN7a9VBd+loEgG1TZhtntxQGe0tfS5c6gq2/gyNB/Y6kST7wX6Gr9SumNrl0CTiACX6ob/Qx/o4EU9NLV7XIV4zxcRfsvkW0uW94lgMDnQmzQzktS3hFYuFXyUAa8btkfI8FheMs941IUQ+hhBO17WZNKGHTwB9sZVzH+k2wNCLQoCANBs27Zt62fbtm3b5su2bdu2brZds4hZyFF4ftiXpbMTQ189epD4zuxvIJoY2oHaW+w0DbQXo5MxFWFEcoH7UFIqbWdgTjJYuQ5IPIT2TCT96CZS6PTbGPiQpIGcYXy/fmz4AHQ2ZSS2sfYMKGwYTiMG88fhyA4WDgrzYUDMZMfTSZ3TPoTy7NkJTzd0Qvwc37MqzoAMOs2d2RKZj+zzw7eEDpw7q0eKeW+MH0BT1lH3caE1KQvfmZnyeyJ1Ulura1JohQQOGpKMFEQcY0OoOOsvl+FkEioQLtGnO+ds4AYuqm8bB1rAJF0n1ILNEfNrPac2a8zjZaPIahcwUm+ufa5swfOmbmtQkim+JAHbWTKNv3Q0FRYT6E5YVA78sOdBcfyuNY7jpLjgqyubnhUNeAy5igonifyiu3HqtE6k7lgjxh1Vnzb8/cGEemHhAJbrSFFdV1NgBEegZzrW6TMcxg9Q9P0nuFn31ft7PvxFVCvyttqMX9Yz2ey8fmFhp3gC/x0sv94cqU5zRXr8N/XijMNJIM/LcB8wxb8VmwiTmue4SWMtt5m8PD2OOPduKETaucGIF5/C7Qwfvs0V9oB1lKIW7rHf5SvtD+eOuBN0ZPkB/5uozWM6Mm4LpHKA2DpD5VxY0/yApUd02M86l7Fbz5+AbU+qXz+d0xKgnIZ8ZolEpVYRUobT3ZRjDeYWSZUAp21+O01JWLAeSYgjIYBWiJcLoE8BsU12k9h0lx9i/zxjH6xd6mTL9bwizK9IOJTc6jdFDaezpw9t2GiOvfp5CmNicXZzxD7z71x3HlcPIkA1QrP2iAWzgGNMaoB5toT/jJljS3Ty0+yKzN3WouIwF+lrxjq4ZlUFid41z6T8WGmKgKnKwGf0119caMqQ5A+5W4i94MX0CTkKcPXeOyKEsFe3YIYakw+itSxdg8OswZmzAmhapmyAUxzpTF5jMalBn9CL+XV9XgKtbEem+Tw9g2XbLZkkpmjrsckqVPWH9+XhokMS/hqcarwWXfK23AkDmx84Jv5mF4wQLs9PdsJI9orXsuawZMP7nbvgg3Rbfwzm3VsQdPeW+7VhfIbtM9QKoleqiupc2EWLTTmU5kJFf5wlLLRbAsVr+ld8VJg8bldq9PluLXbP55oVwTmfsK7x7vrk8gJzWTjkQ4+o++p6hthRhCZCKjP9i8a1NphfKalnt9B+sQ5bGJvGOK644uM7kfN4jmEIyfiw6wWJ19W4WK5MVZo54sAxbh/ogNHZ34nxzFuppezbDx+7dTWboOVScAT5lG+ryPa766DoumAu70ISI6TRLrMTGAYo7oA/76JkcAeeSAQsNUrpfZpIaD/0bO4ib96/cYapFrPHSCdUDKP2LjUhG1uLwGOrm9SAGa2vEzHQ9vBH0jBhJ4gliDMOUFEAtEAc58cY4iz+vUWEk0Vw76/6kgfxOVeKkwDOoVYRRVrUmSQNyvvB2C/npuL7uNJe52R62u+lukR9KjvCwuHBa0L6pRL8GyDTIGtWv0Yj3jTrK9sKb5VkwwLiDKN9CI+9/tm+qZ9+IdY8k4hfjr0034E2I7ktvFOZcxqk605nPlrhiSnD32IK/6OCZGU1i0Pv+eQxcwTTxyyjFHOkcnmC0aEPcsEcCnfGTwzJcZdSvj6OJBMzVu2ySMZJEnfB2rEd6/6qoBQIl3FJmMBmjAUbBvdllQyh1i8hCrxR2PpxBHzicl2lVXkXqoEfCOXEJRoZ/yVCff5EFh3qHJ30mAhGFeOPoW6P9gyyR7xT5trnrTPEJ3vVstmcNDuqGSO7iO+75PbP5JOvBaP4lidlWjQcSjF4z9w7TFUbXhjIrCx1LzsupJmACSesZg3E20lrvvH7w0rOwBRB6xrlXxkIrUvc7VZKRNdVnVv9LA9U4xUxYbysOONyBUDOrIkEUDTD2tw5nWc1cgMxRNDGFU7JCrzbZpsvgTtfSc9DdcvhKIZj6csdJgELFgmFG1CCMCuaez453O+wKJSLqaQAEZA629w1KTw7Lb2kEvhJTlC7MGvv53lTCmPogLdeFHMG0VV4TAicx1owqBg0T+tzYGJFYDZLMUEo+k4FTF39fngjyO9BK9ueigeoTiJsBFWr+YJ74SSIhWno5DlZ+WASUdt6OUYnKam8b0mzfYbsFs0rCTji8CiPxhDRhYBzoRSRTtU6b2rI389Fa/mTnVjA6sLOos3ffmfy+U9TYCM3cah57enWQHdmMVERYo/zrtUWqOd4iuNQwPjcIQHF1mnVlY1rS4L7oyvFunOmZUyCI5lt+Ciqk2qZnbaTnZNgoZ3eCN4opY74U9BhcdWANnuLLMyclxDoJmW1NsuxshFfeAv9t5fkbSyaKlcphV3JesdiC9G4843RhI1mUWjh2/zH+eiN9slWP9PVYehc+HgjZ8rsJoRAcgV/9lDb9DJXz/Kg8qyNvjbexoedAbT097Pi39AwnDdjbgVbfhGxETlBlrgMtVIgpyqxchgNbS7Cr7sIsiPototgQLGjsn/OHC1i/WGrgDGt0nAz6K97fBb1g8SSLVkBp/mKVYWGekIuh4TJgMD+jD/VBzG/Fi0bGqTGipyHbbJSxVOjmTtFjpjwvZYyM49GLNKNDnobgUilQKANDLWeM8J+M39iSAy+p0MyHPylnZJYDx4aid7lUkDkQHkk49AMC79zYnDZJJ1XsouSM5ISxbzbf//HOhrUb3RKLaIRDlsm4RcgdYR5hZ8HAl/pcjaxNPmmqPSVTmScKrzR/af+uMDQP/26I04M7qAhWrDQekR4WGf8O712++dvTWRKO1dV4fQiLiXVMxSUc2DsoiDWws/CEMwnmXlJ2YnHtYADgmizu0wAHQ2S6AbfJ8CxMSkobXCgsa9HDjVuhD+EL0zJ/Svg+3FhAYWyP90AoiYnNk0y85+Tr+Gupsi5QJtYtkhr25qcnpFEdGQK5p47UQAX5bKXxXk9iNDMpDDd75ig00iJJoJG1Bd+hfSY8EUT/2D7wg4dfaGMAjvTwu8w8jVigxogdDwgKxH7DOdko5QvCbwUn0ZD0PHirGRWYwYVImXvLYq2Yn5a91+iSntqpPeWyfq5IQX7gmF52RxmGiJycU99t8JWNhO0v/cpYtRm8C0NPo/bmmp8DxLIqqWIamymW7/B1GqlStvPMonAnZp4mlZyVzvCbS00fh01OZuMQc/Xjnap2c+71gf49o2Ah+lA/Ch2CUEpF2sZLMhTENXBqKCdMmG3sSej8kZuZCkEd4K1AIfGrYDELbgDpzHqmxcJBNTC/qIuHIJFHJHC6w2ma876vxDGrH0Q+ALMvrJevL6KZyqGZjpFHrOCOiaMHKFfy4zOEff7hRwVwJrCEonOykirr1b7VwO2sjHz2Sf/39A0Opv3GB+SdS9kvLYOPDguZ4RN32hZzZ+guVzCQWwSoSYF5n/dkO16tXQ9biVvZXGjjyeLIMvO/Sk1g8kKG9xJ9hAqELWdFmNK3DblV2Ks0xo46gHz/L1fa0fVymmnCMUwzHs9PoxKtoXX2d3TdcvOlbDLR2PKjYEZt6GZ7FsEb17c31a2YBCGKQbPdFGZRIUihfxe3avS5xbUuSkRXJTMqqIHc5RX77yayd33h7Nr6R6tkgHP4Ug5i42uiWuwGktRhiT2KuUC9/FydYe1l3WHQWd/uoI7Vt8gXmy/XZJMuiJ83yKzdjosC87cIjEjKTK61HOMk9kFe347sk+8z5hZuaNTKI2nNgH9qCfA+Zsv0xUjG5e7iigKCj9MXlK0PHkyLbY9V6OgdekNtGuFr3uPKo+WXfeGjAweGpL/yZYryKcd0SD9W0ZffS2Z+Z3PNFbif3zcVyzsmlAZsZxvBC5zTUK9s8wqj7sPzQxaUc44NUxmEJblwuMINHXz9u0a99RDrrqqT3CRvI+Rm1E1V39jZaUyQrQweHqy0JlygjeTNSGRa9EEIr/bpbiPqY2I26HGeZQytl4gfO4fIlbnfarOP9effNmEgEgK2wc6YPfjFnzg/8uX9El3Fnyi+y/LtvzX9cToFUNkSrQpP6CSFEfKPiw2lJWWefkIgAvzo6AitDhgbWdqfDPLZUAVXGPiJ17lfmPYzlFlv3QBOkXjtFRFDdwcSPVBrj4qOmU/oZJeTS1iE+3GRyN8uHlZ1LYuIZ3fPpi9I4HmuLeOzfFPKXaZlWjbrQuzbhUTZMIkObVidC3ReGf25Po1+Fycwe96qZEoy/9qy1LZKLAHJ5DQgZOK6qLNZKWJzIdeDN8SBozTUJx1HnhHcrJ3Og/pgCzL8MO8aQW/+D+rZ/1s2k6ome48bO/k5rPMyf4lbgPKLIhAkWI8JoVclXqWwmiHj93K2Ke9sTmRyRKqNIOgZb54HTYhGOjcxvQQn6hl6TaJ77EDyS1jgyg8Xf5LXOhKmRXKREt/YB2nsabhJCrRtfcFoAep85EUaglQ5CBWKIsStiFayuPAimxztrZwVibtc5oUo6igB7lxIapuHvNjluAEEPRpy6K13tz3DPHSzKR/6FcU5rYO4+W2N0LTX3GAXSc1RuB2fqVA9SRApA8TcdtGKfTAE/HAF4s3whtIVqTxsdD6GzXdeCDj3s0R5AxYLl1KecAIv5UmzKyTHhLI6oaGmDMn6VpaOtQ1wOGMVopLPe8Ow1Olz4zd5PAEDI2zQiYZS/5vI+am8jOJrMQRfbWvdowtJVe6ZQf+RhtQzQw8UJY2FHlMxbam+xIJnF6YajoBcgLlx+9AB0WZrQI6fMmYZi1iez4bFPsZSS9Ci3iVF49w9SeyQmaqZ2i8XYOKu+ONLMo8R0YqZryKdiEyonZNBDS2fpN/eNEHISvBBDL/XTs6/PDTT+CeGtLyuxrUKZDBNXfTl14D2P38V8dbHnDaBcKJZVdqE+uDq50jSUJbAkNdBRTqd16vCfSlNn2XoJ1KMSEi2rS86e+VG/PxrpeYktEZG6KHVO44WPcXLh4YS+9KAW2+HLfdYuLeNalvXRVFqSfEs9wKiEDuixu57d3rUAzhY1AmptSVaJPpxUAkYpeoQ0yTnXavzKDRKQT6VrkbGaJzojr+7UshD6hiHYLXpd/+b+5Y73kE9pSFgcq70uaMI5tyQTlyuGaLPXki0jAKuzE+j4O4jPOowEfHCabDUJgHmW9XhJ5UAImJErJJuQIW/5kqzewjoq90o650IOI0QxaQiQl6tBDC25crjnKnAUjzadgjxHDswaaplYyYbBZIDSqgbtih1MIBAp7i5xhNBogIIYlcUK4/sgqgtLosHrHzKpt94szBEqCM9NEyDiZFs9b2r7i81h7BccAlzQ2SZH4AgUE+YAMQ5K7v9gC7Jy6Q2VRuXB7+MNpkptDOGPs5Q3Q8Wulc/m6hT6R+C0aNrlU9PJC9aIQhjxQpjV+5aPn5Ic94ydrR9u6N3B2S49rWQlhSoQSuyBx/etTEX9ap2Sw9+fvRpKIdt10/HdbRvfQ9ScPMwuV5TNHbN/cic9FdvB9SaY7dX3Q6NDyXZnAcMlhF1+FfcCecCdztw00Gtv5oYb0gGtSxV8HbTSH8txv+/Bz4Ho6KWg0UfC1MOYWIelhnIVIYL5Q+hpURiFVDZk8cks4R6vJtmA8ezx17JUZo90O73OjGww+pHI1lAHwIV/avc8o981Y1NaFRvvKCmfEIUw0+bYrzkkrokCygTcM4QX2VmPq5D4cyNWU47GSlLo0bBaHWM4GZgB59uR2wEhci0QwimHBMmdmw8vOdyUwiv1dK8SaR7mmDRYeOjbIDGEkeJm+Ew2gp7bfhb8BoKCAYy8B/102WCMegmwOOV9pstPAxAVkZkkHTiFjrs0QMHqQo07GVtECHcKTAY//G4+Q9M+5aM+6/COtGx/DkY9QdyEAJOrUmkBI0gu6jyuVPu06cN4bHEaHQAJ/gLJPsIdA3TeM2YV32OFTNuHEG2U7Es0IHDJs2XqM1+twgu+exKZgxyYbXQgfkkaLo3EeOpJHIqE5+k3/4U0r2UWqYs/+BeVMlpze748BjqVpt2MBunMbnnGf9zqASS3j/ezq0/80V2eyBbE01uSNgYYS6EmqOZPN7oGiUCbQSyW+3MpltlGPHJSwBXu1Fxg0eV5GEX9mUrQQpgk6FXo62gU6PnWN1cFmweQiyvoRa3alKP29CT1N4CGU/x+D149HXllkPgY6+FHACEfy+MPuvHINSI2D/R6zfueVx/UJdOkKFFfRwcIS43CIDw4l8ZxFrJwlDieP6CWo0Gc2Rury8z+8mPTov2Ozxnnf5JJqrRcmzGA3+3WZP7ljO+XFrzR3jGRe3MmuBFSYpQ/qR+E/hsRbnxJrduK8hOaF412uMwdprsZtRF+On5UpNmctPRRF0ynQRwA8ykHiDjKdBw/s3xBYKPbxf+focuq5N+6z08VDyZC1o44P5y/XT58HTP2LlqrS/34hN02VfAV8Yw3orSVqZcaI85OfTxJ/TOb5DPt9CmnIcfXe2RDNcYrDKj1ipZJbS/+w4nWbk7sGvxljuh+NYICuO/Y8bl6lLiTzXpRf5nt9p4P6U4kMprHPHzaTO3V82W/XAb8q0U3lpkiAqLbkfnevqxv7eRHzKutirBlI+uiACsXsAaAp26dgsD14uUdxUC7IRyD824GhNdpKhjG7epsDW6lWqJRk++Kg0PbPwluS4H24P/PCWUYOQvB0FacZ01d8KN3ASaNiO8Ryxoy82kEHBH8eyEh9KMmkTBDv37Mb/1/ttFUtUW5DZxdy5gD5aJeDdAYwTqqVcRrOY5QYYZjKZUsJh/k14ySIkpapf5qkhAoOR6rjCDTP/WNN0s4o3MwVfGhdyWWX6bttQ9jDVzahes3M+LplkQVKed4UVi/lmf4x5PpKs1XvQP3RoX9YcjE7QngTLDZ9ALmMpCgH56IBFN1FuwwwvvqNI2p4jrKr7NTbbAhNqYDgKkTfgYPskSHO/0JnoopskLCzKFbILo0JOD403569P1nNhaU6bCiZG7a1kfGV3nnu7upQ4yHnYWkUxkkWce2Z3Q+JaRPOMftZJCAwtyZudtFU7fbOob9lRvT1OlKBOsx5KZsYbf0nmE7e612mFTg5iAPRDTKKWkdJgU9CGTYE+NVlNcVORtPa23CIF8+b5BO14JCtG8whA8zSNHbRSuf7DHd4bZm2QvmCd5A9CpJGStcu6nkLkUysX1QBMPxyiEchQjB4rSzTo9G9rQeW/dThw0DQo28KH0pJ+sPd0udCSCzobWrpvvu7mnq6JfPNZisFvdJrfqV8ZHE42sEun5Y9xMzhhloUo6vdcWj+o/2QFqRE0recff07G73iKGkxcmrmT6lieQjv2tZO5CEPId+btN+QVU8FlOGz//bRFI8EwH3FoH0hTywqudb9kSOgz7FAsSrHiyKuYWW4Co8EB7CsWpG3N9AUp2Ounqw6v1sapaZX+u+Xv+MjL+93XqxD6KidIQxR01gtS8NJModRw8uP1dveT3VG25poMpzleqRZA1PCpWrjCjoHLjhPyJYZq1+m4PG6OfqLbQ15n+UlKt3/EaA3Ey+PvLSVrA+GxFJ9OicDcckFc0My7KUZNYqgFkCj7wclW7eqO4cYr96UXpY2GZMjLMVqfSPSGolxrUtWdK9/kDW3vl8ypKMEHVrF0fmvNydA6CBeCvLUvKXdsEOeWWtXpNZJZ0RysGjx1QRWch2sA3Ylyzb5E3PA+Ap0Ih01U3UTGGAZwoP/42uiov3RJhORX1ip2ZttEdajs1SrbROA7hkUrFdrsKfHgu2F1XuQiZwP6RGA6W33Mw0smdXG0aTP5ZJzNmLgcrawKEwwjjGnkzTvfIaJGIqqqGfA60RCTa4JxTwPFVOL6UzBzQ5RKU+NMMluJbJQFjYE5fLmrDhLnoNhxc7qYmCPt/+4urQ15gGko4PzmcHjUyp3iTCpCq+T1I3Wd9HM6N+xB1/CjKGjJS9hIAcWJiSV3fusQ/UBZhVB1bTFl4biPSkTH8Qm6s7RPNSOhdcQhSeFG2tQZkneH6asBWkWcgi5X5+W+Y3C6I7LWERDP6jIuINZ/l052vFSsRzcOYzLBdzBKDKKb2D4Gfqx7UXn0YQlt0XsA5wlPo5gCfCYuLvFBdNl3UpJwAFDpnxcGzicW0xcKBytdm7HDtlFj9iGputBjXAs6r0wstzlykLVdkBcbLHErj97Ab1uN+sgl8D/N/CvG9esc0FFcU/uQL1uPX46s0F2Wj+PUEkT+qJfKiLoVIOA1GDlpjDZjss8jlGrBam49ImkqCj10I1jm+cd7us1ku6rUocW5q3iCGn7iDwW1iTUAn+Z6Dpy9a/T7EaZ+dvsvw0CSUQd0Z40B+p636O3LMqf7Xz8sDcrpQM/f3k/wLxve5GwCcYHlW8ycEvVAYmifMiDOMubZ9TSx3RnU8Nh4f07/2RA/gsu/ccIbUDqp1ttZzb1h5E7kyI9yZkZxW2cHz6VOAwzGuLaa/qOCf3XcWEc2uHPVt4vXRXgMhOwfUuTwxTZ27qonOLMH5VtmNDFNNbQJEILQzJiG7EL6G3HFkB4s86vM6iEjFyuNeLMBiNOqxkKNIxTPpX9ZgECq6yfNd+3L2uyRpesST000JklAQY95Uuxm9/uVSROheJ7L/qeRc2VtRw/swH+HiXM3xWe6tIgXD4s7iesg7BJmET0axyWRJdTKbK7rkigVMmWQ9s43xZQusTttEjMHiXX5/rd+FLpn4nHkV5f/jDGL8BLJcNxD8A0MiP1nDwear9TewfxfJr9bS1nR0yRPgS9NoEWtbiNVX0XD2d/oZ+8hxaCFcsauJy+NlXGy4arvYTcIwrRbQuOTqBTp6F8eRCsipwKtoS26psX+a5J37vnY5aIJ2Z0/ySqxsNGF5HGrsXyCzvOTaAPjk+I/3biokM4T7sW9XXlITLGtZo9g89HKyfOyMOWICpp4veZq8+8+lM3CjxL1N4ZAgp1UONUzMVGbfRwHK+oQbhWFF2dPcQzipKOrHxakkeyCbgUFbnEcfAh6u7xs51kZQ6kTr3z8up8Kuakj+cEZPfh4FEV2OSQH0Te3knWdqtYcqT3uGGgUXB7zZMfRcyqZYXPNCVSXROltPe3b42iNACNfRP8we22gcKO/rTdivZ3c6mWCm+Cl7C/H1wxM86CnbMSAN2V6ez1Uz2jXvWkBblCnJLGiUGdgoqF7C8tOsokeJV3Lx3GoUoGfpK2AS/FGqCAV5R/958kRbqgB0uWMqf9S3FwPhRr/avg/33HbmiClP+eXfhtdb91LYWdVh6I9qi2v25VpSOjXRKEQjy21CtpKsU8alZw5rNG264xz7mwXBKOuIsv7r7MX7rt5Bq5Z9pvU+NHwyBhsdrjOjXuoMee/zctwKo4P6GanFH/E/ZQU7oxp8mTse7MOd0+BnTtLnmD3rjNC9A45cn+7+tNabXHkRXMm4fmHFY3C30qmlSyvNvqxJpCmowA63Xc3SujVXh4vBGue8OJ+D21zB8dlcGJvJKQ4GVZxtbQrMisHJ79DMIR5AvwodchU9bLTUeSPXv6j7tWW/X+2kSNBonw7D4rW7M/fT0U+/FcVIfqdqeQDSzGNJpUwFGKQvwYj3TTpEqBGZYW6jOJ4mP77WKmkoivF7OdeqayGC965GZetazLrkxWh/kgnBhL6K7+XGnSzNQeLia3+MMw3xkR7vcp1L3OuKUDB8dpzoDudS6abvynYKJ3vqYc90VtsTWg/34we14N2uu7u/sz2mYq/U93LRLdbMyjByz/E0EMONrMSRPqD8OkdEDvFeoMyVcH8L1jCKCMFcaLiWVw27oKVWUgaZVlX4qxdZuaUvk5kRMwpY4aLVchrUjB0MimlIYJXy5AQvzNpg9PuO9szPq8XCU9NiQhR4QO4DxmoFTzX5wW2638Gzhp6asXejPaNHiyC3tC+g9l2lt1X+0WBWHUAyCNxM9OIVtD617H9rmw3CIctXpFI1nunQ5jeX3hezc5meeSmLN/N97LDDG/ybdPGeT9duWoYPoXDCW083W+5wKQ4btw14rSkc2JE+NHieKoCTQ2mWVqeobF0fW6xMNzcmLD5CCsgfIw4SktqzWkVWX0Goz1+wJWLyd03LJ6/wdk9TQJGSkvmWBHIpKJxJwxfPAPKjuAIM3PAcTJJLWfFmR5G3SgHiuBdcLsvb8PiOHBSt+oFBwtPwH+FA9Cpw35DU2pk4NHcYNXBzX+YwdRwJlaKp4SpFXoCZEfn8Jk2EBntZ263d7Wx3XUEU5/KBEeftV6TCyA7RYduydf3INZ31JLFAUtHY3c7VhkXyhAmwWBuVeEOQwgz88wGzUZMj7h5561Q/5ZgYFD98GXl3xS0l9ykr7wx7/v8b/Xnuyh2YYWltmh8lrz5ZQivp7DzFTR37nG4/314FM/zuO8M0yaEHYvbzWeLysBn2jAftzZ3oiTpNlaZsUQ2Y2X6ZMh70nXfwjT1ec4nSMmiAAl9paTH2sn4r0HiGQbBO1GEVsXTm1TwIsHgJ+Ya8w2HbJ7SnCfVKKEyWhxn4to00NtuptP+kYCy5+norLt7Ge0veUh4jdcglpw1Ni1Y/tCRA3N3H3LnxCAPkFnGmtlSk456ySzQZlYKpOAova4sZuuNUwyZEMG4h6CmGtIv6xinjOfYPx6LeOatsqIBcPjgY1vPfeJ1wEII9C+X28gNwBxc9aXc8adiUc25kkMxIZ+LBj2OeaFe9Ue1xkH6lMBHv6PyYzsFJKXRh0+7WdKL8g8WMLUm3g9X76HNrmoyF1J7eAJNOUUTsVxKDTOk7GJngBywrK+rEi/n0jNy7DuJ3wB/6enW60TrCidos1i/Nv64QQO6rVqwVOO7vVmS8H/7XMqn7gS9+iY4AJz703qhfg6yktrD3/FUzKbmmhVCvm6NSkO2eIg2dm3s7tHIGlcGINudtjJIZxEZNIj2xo3gKPyDNueWuEj9tjPU8+G0z3nJuj63/W/uzFl2PjuNRFAyhvRLz6jaGbAkNL4eDhqGS/d8GKEQXHav5RJhxklCYgDRzLwc2s7STwrRQFAsMjbyekGL3m4+RCsB4m943r0VTS+Qmt/5HS8N5uqOm8/9x6ECm6GVjbki9Bcckjikjsib5cE7WDCBv+l+A1YvUM6w8pNlVAdIiRlKpGKhZgS56jQPfy1VgZQD0ZJaxDP1JVXEECYM5hYbxXTxzxGCEeRL8hiQJ2mmPoZZxlb0HtLZIAysvVrmRG1yughL1Y4zMWToC9A1Hf2H7BJrUQQmR9ZrOk1qht7Dxjeed2cd046ERZsE95fLMcZtdrcfr596aQBb0Sp7UpcHcu7ht99QpujPS2IXpbGHkBP8CS1rvpGnnmF82I9cRPNiSs41pCfYg+DGSxQOblRtSrFtsCUap7C0BzPj2tgosKZ5mYi/cDUnnscgjNJzYaaEGBTlw3O9cth+r7HyukFUqefjjtCyPu9OyqlIIXOVKsyD56UXNwznFQIa+3SUuhUfWXDgIAgtq/4n90RTuVsgW/TCO0mfZu5WtUme2MnN6NaUjzspJVEZ/VHFN8/5q5wml1mHuX3agYQYE3t/WGAwnxpQg2PSILRuAmEhNfxPd+B3bTw00mHz2QsAtSwon/Qx9T8KvSZlPpy8STDie7Co6yAa/tOvCHfp9Z+jAgU0z6cdilUD8teeIFr68d4YMY/l0SGrq/t3HjxRxNt0fgXIMeLaNVJx0UsTiIFgUQkenzeoyrz/vok6jEcw3BqFcAos4qQrJjQ5HsIDk+gs2/WILNTBsucT1ZsE8MZag2bo1d9/vV5z6U2l8akvgaigFZ55tL8sDcqvpEb4gUEZ5/qTYBBOEsl/a0L80MNwCcRenC0T1xX3SSbj9RnC/xXaZZp2QSuUTaNcIHg1GdpXPHWwe/JsGu1cusR/WFT0u7iO3szHGafKZ/4wc0SaKGorMSBVnKVkt6quwACjFLaTTVWJLwCqR9zTL+7kVJ1qAh654hk+f0Ju+blMJPLlX0l88CbfJJq5OhW/lVITr4VYmXIj4x9NElwWmPyAU+bJ66lFdIKvUOi1JTll5QY2A42ycBxf6Bxqq472A67saaZGvADpATCS4HUc1SIRfI87HQ00YIWOvvmlb26XyFPLZLp0t1wQ/XmkQWiq3jdu3YO1GnWveF5iny/FHUOayO9fBPNKnQUGQatJ+/rj1QEHAqm/lM9VXfD3e9SUHh1ZS8GwjlWAHwnD4P9g1EI0vXG9z1H/cSKLJl7MDx96evgIZ8zq36YWYl4z/qIsutyTWFrWTDBkLYsh8nwcPfrIVTiqRzaiO8FKFNofm7495UesMVYCLExnzm0k1wJt/y1NQ4hIR9s3MarUZ8snA1xDtNHb+VkPl0BIC6xiWFqrri3He484cVX9lU03W+ZBwc/7Vzjrj/60lJfCC/37+QshSarteEF+VlDG7TNEqDjX+/+DUiQv73C4rQG5sqr4jOe7TKhej99AGI4woLt2f/CH++/sxGN4G+VRFM/pnQwXzbs3jw+v+HMVzeaEeetscmA1XIE+ZsPbVKNqBwurfBnUjgTXQlBRovds1Y5wRSHIWhQQNRQwZ0xq/d632b+WDWS7t0LtAFuGWL5pO1BA9ElbuFXBsK189cj7MrHLErBhKQR32V4t3Y24jSbnkXUXolAOzrnTYU+zmzH13wvMJ03uHlte1sV88IeVYOGNvgwbFJyKeLryqyJGA0u5GEqFi2/qssl/gkcFyu+O+K2WGvUQh4eP6ZDANZWMwDTc4X8tWhtbbGpRSbUskRrVbUpLCcCcBnm1xzfaPMmfBSFB1trJLfZ+DDu+s6HZCU9ylQO41CQKDBQhuLetOK1Fe0rmd5rTKhzBVw+J8FJXYH23E3ZiGErAeNVUHspwygJ1CzqugpDB2rjbYTPcH/ko7X8qIZdae4riYhtf7LLZGLFrfLVPvRyZEL2tlM6ykIuBJ2tbbkzTtVQu23YYL8ZahpEe3RfqlZBcj5xDlbH5Gp01FA+muWwHgiYXzwL/JeeQZzzndSrLcj8PpMPeKY67QIYFzJfm0zvcyQnMhvYwadcsEni3Up0Wn37nzNdkSXD7ItWdELbUa2NxKn+lhf6h1HXYKYpqHoMRx6MQ04r5fHU4tG4S8Fwu9mZ7vPhty/uFeTMX0k+LojW67cmXczFvQobZfm6esixLJ2L5HoGOkSPbCkONIxmwRzd0cNNoBZD9/N2y2/5at99uXRuPnXQbblihnjl5HBC0C9lNoO8meIxC7SvNUNLLjbjQIPqjEqe1BJVRETzVGdAvu/knriniMws3W6StmmhD/QZVFKj+a1VLdNa9zptqwHiYdCn3y+lSWMCql3Tmnww/AWtl9dCE6wG5bbyxeB/xPfAFC/juZE0iE0S0tQ8RLj8OW9H261HHmhlx93egpcQc0nb24VMbv1ztzKw3H867QMG7h7dijFNHkMLJhELcNLGMkdF+i0PlddC64e3z/Tk0y2qIgkuS97u7sET8tshWzIOtRtCSKVxQXiIyoWLk0Ffn6O0LdfZKHA9oPIIXzXmqnD9MFc83RLwvYg2OxcSNsYZesjiyJVnYYFeQb4vK144NYZTdcswsGYGPNtp/UTvLDcUgrT63Z/Qm+Cs25cj7sEIPYvme+IaflcdDEZUAww7OZucsIn0Zj8zkzy/jKS7YhflgXv8Ds7DE1E8c7NoiMUkEkcKxPnplql5sfhnq5S2EmqSwWUO6i05lvraYvBxv4qXbcUlr9nR7+ZHWvZzdfQzQyec2pTjKGv0DTcSz3nyIhh/FpmORJFbGXm+XeFWmdFuF412+VctDeo6pPFUvLIlcv3T02DJ+qSa8rzeEnwRsUgU3ucUNAJr0TGfjmT9Y6DvLaMN+9BJXKn3x8RQOo5EgAhMkXi/FSEaEIg2HyG3MjaLfDwJBo/hqTIk5YczMQmUTaabN51PglhiAe/TISkoVF/ye5b602uIB3PyfjDbU/yxXZbmnm1CFJn7WDQwpM7Um7LzBu2dY2RENZjVKMu1G3be4xAKs34IX87Ltg6rcuFEv+Wi8DeQuR8F4gzYiQxpKN7IQdVanIPwrxTJDroFAGNKNZgvT728PQmdvlzKhcj1c+DdOdy4XCGU9QIq+qg1ZD0ZvCABn8BKwIQRQWBpFQbOJFAmlFJ0mF53XbLSEapsTgZ/YjWlVqNrFQghTP0n167AkrBr6dtBxrmK/m4vgvcZXLs9Ilx6pwD2W+iwaiFH+A/evxKV/VP6TJbvAIsh+fW9XFo/OP+SyQ88PNwbW2JlJnyzf8eF3zlLR1F2vU4Q98/BqvZymYplTKGUzLQbrFkwfeEK031Mwtfdkftg+6wX9JP3SnmuKfDb4a4U59OSAE3a58/6NtV6my/6QCtjkmBPDexE/JwDlM8VVguhNPngJLdEAcawLQ5ISpmUiwTan4n3yVnp61XiYGPzlFXIUh1L/BbNOtV2E89YcCF5o9Ah0ga0XiGkl+FSbzwZzE04ehlA3F8QSkKVzOPFfYPWrmgm7sSuZgbEBwGKrYZYCjK4F+cXNFAwp05kg+c8PTcXWRe8k4VXrKEsbpsknaSjiDd+zZfX5nP+VUdW0H7WipyTaUpuXP5A9R40p25iPnJ4vVPCk9588YnSlppd84NKP8KyKj+RrYh76ovH4L3v7Jugw+QXXicBLeURIHMPAKxZuspfEHfqeoHMEOvddfWIpcDULw5+DtsiCCqTCbQXx5GTlA+EpWdjM+wYFz0hZWzVxyETPXT5jy9lozsHffmE2pVxzEUSm/l3zaMIeOMhy8QrcY94gGSEGvXRdFC9OFkgwWIw046MLIktOkZneQZ7sB0+6/BeKezoX1cvuR6KI2FKei29Scsi8bvWgNTCxtiJQe/CnhCWgiiHlIjd10TGNgpsJcTIN+PmxbJRej2foYwDLXReqA1VA0CdxICATscIF56vtcHIRSrxn2OtL+q23NoRlc7M91vftR8r9XwpdhDZYuBlzBxW3URYgaYcjqFCXWDIVek0e85/G9o/mw6lv9bNrVH5eetZ8Htddl5qI06DOKwq8Sz40xS+EYFyCT6Ixe1yyC1F28aSDsjJ5SUAoxSmhwG/L3yhGJiflQ0ZZ5W5ZNxowTHpO+p69RGj52mlLrXOFtaaoP7VEiHIUKljlehs1FQkykXftRNongN0upmDHQC2etp8CF0bhbElijLj3hAyXfAtIUrLPmIEhCePUgCmSdDxS4pC9lJsPwyzB4TTy+9B3ZVMpWwURBglxSnXx4zPi5iazkELV/V9aHzoB63A/XS+Srw7V5TPDECL8rSU3gXsGAMmAYuunPbb/E/nZA2pvR7566aHA4NCBzl6HlrEiQiUjzqRtEzw6kHugtoCiJaFyucxsp1IndjR97sH9gy48IcSDOIwjt7Tw4lOZ28oAzs+n7/73w4hrIJRIQx5fdRPjsMq+clHiNezML8SNC2XdPX72ahla3w5XEt/AtJiGti39o/tPuZJ7wZ3glr+L/t1FdaQHdVpfYxUk1kxD7iZv+VlyHKqSr/wZQM0GJXYBDI0dwgmGMQ5qbjkoWYoYAJBnn6Z8kkPFqB7G3yV8yy3I/40R/DOQkK6dMM/bCglAKazqqCXFVY+BwBKu8L61cjk2Dd8VayOHYN1Ti/h86g1v4J1wIEZ8RvCt9wV26UMVWieUaYjvSPcMrK+nWdrfNyO4pZKidWiuthfYiiFvQMd6cdmqfiU6OL2MuNdwblUmLPryLX/WwlREofRm2dGp9B7Bw7W66O68i3BsYjSxGNWHYeXKL9ZL3xf1eSiEA3e5P17NrpMZF7GJnxOpXzYtytcGJm7WqAOhFGU7Z+ESbHYNpqN58woLZi6Z0yz4ZX7uqIga/KFGRxIAmQ9dEX6PqiEFeiEha7AR4MFCDhsmlEQDoyhNRMb0hPHC/0kGaauPlOSkNrpqMsIPAvowyaPhm+jMvdRAV8V313tTnNul2NH4+vDBcGRaiu2OvLphTFdVzZO0qBJ2jLH3Xm4ACAbUGC+z2h/7XI0PI3ScOnL18BeD+XUUzk12sVhQ4+Fs2UCQy4DZEt2zfNOrjkC9bH8tjEgoWCzjgf/o775emYCcJRxmaZufeQSgA1U/7+dwZcZtYzkaAi/7VnSk+s0LFwOvsRWFA09pUkqQIroZLKaMiH2RoMzFLUyO+f6zkyDCSNoz2h+5LymlXn1byIUzX1/8T1HLOospU74aSwnRdd6BlG0Bie4J61/8ejpxw1XcOw3fGwedbgt+0vEpPTtd/PNq8B9nnhxeWTO+xeB0P3k2hUfBWqqmnPeSm8UwWYapssaxNgkkeFVWiA/noNiXZlayTQiNyB1Q1TBbCgTmv0GAdACATdZ1EVt9t35TMwOGaqpGauX0WB38iKjczSzemguBMSDIleQrcAAsah/tF5DuLqAPHc0BTNEaL8kczKLbs0cPQm4dyJ2hJybYlyTTkII2rQM69jpI4XJa1wMQtFX127X/evOSsOJWCqaon35uGzmfn+zhr15RjTZzai12G5fTYUefVeGq5DXV/Cc0h3q0mQ1EIvzDhPzUexPgUgVnPjkqeFBb4xSVKhDciuhHUt4WRN2eOIH/sqWdQqC75XIP+/KLyD57AB7FhB5a9PzSeMHtAcXwL2cV3pfOgvYOfCYf0Kbi1570r5tYd1mXI4VGoDuNH2w12623vrdgBpNqNseo1fUSpSfa4js12jyukLgtc6A83ZWgR705k7PypGJoH+WkS/MSEBItLm+704nebmRFCBv+qkSq8c3CzkzhlTGSmMh+lUvMBqWuCLyo/x8BCr+E8EELTWAzdkJALhL4UTDN5KQfhZN2MOShYCqbNbge/Lja8rPzmoBOPMA6FgbZ72u4BTlRczJ0kM1QbOoDSF4N9+DgOIa0X+KslTpOn1SDKsGjYz35X6Xt7yvXXjHn1YDT+SVEcwZCAXLlbzvNGitjypSFGIP6AqYGVnkTEK0UWW47HeAQHAY9VmwanqaAacoXZsYoiVYtbYis8Kca0S5uhp6LM8MpuNCQyO0CLeM/VUTTH7LBcZNHKflgkUXJ98uv78Xumy05/lpgioDO22I+YqpWM5JeTgeBNyC8yt06aGu/Ny+QYLOwxhTKoRurnk/GZ7OFOfTp7Lwk8esWz0PWtpw+jMeKzZGJhNzL7hBL1p+vjwTx6ttzej+QapyVntMsB+hU1zW4MNW/1+EuKlTlZVqvefYxPxmpyOb4GmMzhk9vmgSIUAc9l56/3+TCbmKvC3j+MCyXaNNnouvhuQo+sULpQHUn+OIy/lg4gs1xByzy2cs1rP563IAtXoXBCe2WBQwXXCyG3uMQsZM7k7Y/aH+/1aQk3MxnIjlPVjY1vihtU5CAVFWUCYl58TG1p492iD+/k1cBa+y/6h8fsY0aPWysr9GzHGyIwR3VXgAccQvkxPpzmgtYK/ykwrkSUzUTBSeF6YtDy8PbRGQgsHGX47BVb32EJZUJrEzTRw1yY6mZ44QdBTgKKag2FzYTFeKTTCpVXKSOJcdyq/zsii5jabcU+kAV0UZGYzcWKOUrLsinTTnclHZBDf8nxClH7HYW73SdJ3sSJi+ElSw3v/tJmY8Ge3sOPzWzylx/g7Wna1dZF6k2/lvwVceRcxWTbQFvEfsa/q4tEBbtDC97JtLpn663S9KFYFgh2Iile4J35IqRan1R9r2qDrdJmdqj4hkF2iPlscr3qTxhwGSXiMqjMqziCEg1XRgS8YCTYwQEtGDmGYDVyFDKPebFq8gHhW9qMnXSFZ+QqwECtN0bKFWnoUgwjFQc6GYNqGplhHXDeP1qViMiwfa/v1M4nRSmN4ZApEOKRisLWGipeksSONg+wPaY1c6M9Eq9iVaSD2mNard/Zn53TLF/hTTHJehxiqONXFnTxL0QySUY7jHpnWuWdsjHd/1r+pFcmnTOLqQ+ZnOvhrA8xs861ECc170JecpxSAeMt98ISHJDkjeyKfrTIiT5lo2tu13Zl056TvmYAGvL44JxiYTCF1HPXtGye1M2UHpOnXuv6Fyv1s+V/0So1CtJdpfoplUE+kmu/5zqCL6IJTGz9NzbS4NVHLGR9QwfTQO1q7dQnAVn/UZAEe3KVB2KJb2wMyzDduWq1AhZPwM+c6nkqw0AhDbgb0h5UtEkReni0oc3bhBiB790WSDecuhE8r0UmHNBR+6621J9DqfImfcDK0JLpWfeFTG4jpWH8cFZdvlLrts54Tz6sycpXhilpYOmey97pkAn22+7+3LhT1pN1zD23ybrWu5hZlkaMRHmpEGiZ5D2xGX5goiHTReQu9Y2VKG3mCSfuqCjr1X89/xgaiT8arqRf4CePbDOOezrLGc6ZkfzbFSbkcUi2197TsmywGFEbnz9sxpPsnSHT9Ga7gbrOg+0+eX5ozr1QwAW+VR2Op5bTKe048R9629jaB81b7dzPa3WkMoIZVdnBvMRPAzOQfmAWP78kv+4ix+Y3j4sdM3NFM2T9PPYAYrOcE77CjpGD+/2nNF1YvV2fN82SxtzqB8fit+X2AaGDL9Lvfr8uZ/okBhbtN4z/rkXfqnNSQqOp2hcA1lk+BgUgsFOOuJ+iAb/xrr3oaYPtS0GF+ZguKfhHOgoFixrPk3DXv5oecJ0WjKId5EbZBo4XhTLUEQQlTnX16N7gjLSmqFOZCCHDrm9QEFQ7UJrkdv1a53MDvfHSeNnKQI9+6uFtN71MjD3IB9CwKtmjDYl/+yYiKV9A3VFivZPxa3DGVZ0fsLcayOZnzS6HeKbFN0BjdF88IRzUim1gr64d77pZKtkAfIqcR9+3a/seHgR0Z+d70aB4NgNO3UfHA7A2yC5tzKSSq5wqRMjVIohqUO0D0GmKgeS8H6v6DXhlphkekmAYwiPpvba9ZlPfy69sBT6SPKi102a0HqOnLmiU2yuTIxpxEXDhr4LGA96MUCiJYZT+h3yK6njwZbJK/BzRHeHdGQEJFVxlV5djzx0hifakJ9HOW0Er8lQbKvrLE7JndW2onLPVjsPKpEvXD+KyLkjNu43VesJxvQlKXsuqSVjO7fPeK44EUmRerMTQrsJBmZtFBN1Ba7Lp6wX/gph8M25WviL9LLW3XbXeJ0rM45p3A0q6T+00Lnl7tnMGccTi6uv8Y2lWQltta4NA/qQN0TVnkiRVj5BewBoryeq8y+1ROdB6nmc+PNh0yqBxPI9yiBosWmYtX/0vygHvrdbdnz/XXIIWISZ3gZKG+nC+UZb8KEofpqMCQQO8WVetEACOiB3/oeoqSYyrJctjNPK/1yJi4lo+0OidjgUatzuk+l0Npl+t7Egwcxjj8l0ZQlRkdIkavztGnk7SgrrBJKazErT5S/dj+FcR5x7+XeVrJB3vgQI6QMe042OVKW5Lpk0+WwUA9BEZnF7Nw4U8sR6NeWjHbupKGrdLxd0NRyxOJRjGY1JdMTc25MoyuePDe6O4NxfYeRU0ab9qDALpcATHqFwq3nFaMIRTST3oa5CEcLQ6mtTil0QuBzDAjpmK0RM0Wck2ZRu5nDnm4vDoT0vymyF87nFQ2O3LkkPe7ED78CWIPRDVO1gEccFpUdF1SiXhefev+KdIofCrOK68NK2/uNjFf8jBfteEbFzn27hgIbgHqbSY79R51QQ+7kngxVgTRMvKMA+ZBFLLqZ+K683Wc74hvAhFNe+xWW5oAtBXwH9LBurHLK2eQ0b5gIhNVqjipBlBiEa1hD08USNOVsuzkXxJNH54KS8ueNe5dpCFdyhg36faP6LYZxKur//pcAwnwBSbdPF7uPGg+qy4D2ECth6cbNi72rf7SGkYwlA2L8Tf6o9ImsRAQHzsKqU1LzReo9xIfooDmNSejBzL/tPN9/2Hhay6JRzZSDP1zbqPczCxMo+Q2GtGUYf4xQnk9dkPWQSyp6+Z+XZ5za5vWIlcpiOTl1HaBmEoGwSR0oSjL7EwyEUQPH2ku1aWN/FeLH6iftfxfG+AikOcbMOwDx6lHa6SIMNhf9oMHzEO0bK+XyxACAXSe9y9rVkaVQ2BV8+et2R9mZc7N4agWyg7z5uFkLEC5zl+eHso/jfzXOZIC8JOTkWtmfQk+ulkOZfMut1zJXZrT5T30mrIKdEGJAimbZ+4UiF2PxDIkY17c+ZPPmm5OB18/qxyAUoZzZ7/x1Q9FaBrlOv/R+JMxMQjapfdncgaN+UrwqvWC3aMhJTo9ZbMU5lUPSE5hGV+wZvOVAFJ26rNqNJuRDBMwEaJVioOUb1xjZxuAGJ+rA6JmCI6Yuz8p85Zt6Vi7qvDqcQo7IYntfTjQbNkNZagOiSPhWX1tRmMA+Jyk3osWqKkMr8LN8PHFxdG14adF8NsV/z7vf9Gf/mJvF7yricm/r5Ff6u6zi9e4XAWJnPqHn7bLAIClwRKcpK9K/cbmIS9T6OpZ5W2pW4bRfkMcQluIqpNcTmKA51QHy4SmV7GnLodWYRJEoKUrZh/hXVUEKh6tJ2uCg72H/nKlz/WTYW5DHt3WGKIo1IUqHTz7rlIZaCZU2xELB/nNi7wF9bmcSCPMeUnq/Yxtzzt6Psfvv7xdUntiQeMmFvwc55XSYU35BXaCXnNurBZGN2YNR9U1wa7POuRLzur3uff1LmWQTxcKO4vhiZouJmnKZfJDnv5puGbB0cMI4viAHIKKTuMsn9HiGkGtA4ExMYtOHmrLOnaOBAr8x2S3grurO+61hE95yT/A1zYdWfvfQ+hJvRj8gq42o20fIfoTrL+880C59021jt4wkz0dL9RPfXizcob3i42x8/prhAVL2xQdM/KbPJxwPLklXzGG72082X6uNiNFC58yGUq6++p51iP6z4boMeKdNcAeT5PGJP1BOyNsQUQtMz5RSr5LgcTDsNcWxFJlo2EL0jdHE4KxqcF6iyx9r6iibDpVgh83byRNscduz+tw9xsnaA7FHOSYbQ/yhErYJMuc9PPNv2MtJN2NuQI1wk3lNT+ssgjTu7tQDvsYwe8ozxIi88QHu4jBGTkoWZBDx9V59LM0loNxbbCgj6qO465QPj89A7cqNXNAN23qaLfgI0d3NPyenYNe+rHMGqbZ3+Wk9I4CnzpaJG0oP4nyIEx/ZhL5DtVvE0Z1PPWRXtoo0OJKhqjXajiStiNlakY0XFJmY3wwyVwZF+l/MU4GW8wRzHIu+MrqCDrF8oL702+XPzIesZ1Oagi3vRHymoKeZZG5x1f6aYxtM2tfW0XVQSppoAfPVaCbZ8zJJCqN0r0o9dnf5E2PncEW2SCfYQpC5JyCcSN/qwMQ4PsduflUyyaAhYdOEXGAzk05AtYsxynIAD5DYHIWuJvLtBlQgJoyb8YqoucPaGB88d5p546Eej149Rrp05P6kDo4yiTpv1w90YzBI6Gs/9XvWu5p06bTSYqpROxv/X/1+WST0SKvV2nfVJH8uenhB7a/7MBfz3URXAq/lJTHTU3MfidaPT3cAzlh34nulVpQ1c1mn2IOivteKtAt6lvcnPs53b3NeZcTtkLFAKROJ1mcfqo+towN+isj04QzjZUMEpT29XdybvxOchd+ALbfBatTnxEQFiPmNU5+V2r9O7JeNcegHGKN3/HME83KH43xF1VTveKI115kvID48gAyS8Vh6+PL714bwhpU+zHSIfUkOL2e5Saiw/QKaC4cJuPRAlT6VGJP5ftz9Smr766LfV78+ferqAbz24fB5cYl34h7D+dFr0bTZ10xYxejsKlmBTEcsPRvMBbjDDzyxB1IRe5XfKtxn3xFFYWX9f6+GPkcCLVLqmEnnhxVzAhjSIFt4/ujPuHZUITIHQQW+cJLOTqchYYYN5ZxUvmN07q76hnSon4pXjLx1P65YsK+l6cFHKjykwgGESqkgEUMG7Fsr1WOK9x9ZCGvQoOIVNetHN1e8hW8UcuG1vI9o0hfSxluekELAvjkM+jLWf9yBRDogHdFFMHep0DwMykD0CfXqMFvcmiSJcTjgRakdhf2mrK91RKF2baOLk/0i3B4NQEAAAoNm2bdu2bdu2bds2f7Zt27btuiFukDf+++MmEHvi9GWAIpIy/poioR163frvAFJtTI3MFylSHDGJSHjlXLV9M7pl1fI18FQsL/rgJ5HpVFuFU3XQj59KulYBNOFJTroHP6Qr1+8GgOTaPLn0Dfp9nxE4Yt75Vve0KJyZ1cmamenWyRacejJvmlR80N+14+Z+sHuWVth2qEhVhKIY5XyFjvdxXn28Kx8o3mKfkG8T3W3wkY6PBG/6KndD07MhnXpq269hVGDc7nL1tJEbxMpCGjtRJC0AU9yxMYhmaVUXcg8eeT8SUvR73qYC1Zsz8WLMSULEArGGPFNns1Rq5q4yrnaF8MFNzkeB9ei9vrzJaIvCz0EPbbZmpj/K3bKXwtxcOhNVmZ3gg6VTAhyWvcdzw12PLKDBHWUWO8CK5G8xafuI3FXSEIGLyPYbyqXlHl89OZh4WJvd6keYS/9XHVKnbOcgsVRx59q3ySkKl1aDbOmiKcZ4ICKOvvSgn/viJdtam/rLAyjIdWq/XH0VR+lALlqdo+sMVf76tTaNkmwKBSF4GDg9r0Mv2a8kSP+PNUcHAycFsC8QQ/72BpuFuJnvilJd/1HY55YX+htPbB7WsDG1tW+yRflLoO5JIGw55EtRvxqQ7ibY3ksLba0vvRktqMJendX62MKxNUb1Z2K0NZM2+9OheGhB5OKqGZFhld47oRrR/zKCFbPBmVBpYHSo6riEVnhkDi03OXHP9vqZ7kSwdf1+opBdAI/YmHaTZVFeNj/pDG1hjJXsO3P6K2pJ5jzsdq9i/fiBy9+RcbczBWXxUiihMdu6IOcSiAzDDFYK2jd83ngXle7wCBjGNJoLqH8frycorO7WlZpBDbKYk8SdPIzeW0fueNavsEREdtMwCjiMTHT1LeWcfOWm8THShdXYR959SGCVF7wqmNFsCUp9H5+/IJ844I3vQ/7swBQPDwh1g28CYBdUUW3VGBMvBY6Dy7MLDlHZwiObNfbWrtVWBmIaRdI30V4IjcuceJU+++2oFHpBjiRyOkNIzfgl2MSvCOIHNd+a1WHi92bI+l4rkZubBJk2a/bqDabkYmKAjPYK+HPZ10CQrJgeMzRddFnByCN7RehXnoXpyKzOx/xnLAo3cgmlYOdL2Uai5Lzz+NK7kYVqNbd1iBITgD2ua+bdWelty3GmnivHCU/OOyTIgO4M/ytgcA1CrXB9O181VTGO8MyLDjfFQljQ5QhkMMn3UPngRGH64F/urh357AIVGkHERdvebH0WzLBroepmwBTAI85GUPfDhBcb7tt3YhEtskMyZeGmsVozxrFlQUHCcPTnwo0Y1fI4vL+TCdkuCAXWPvDM3qvGenhENOuz3VZDnsLyn5XFn71kU/u6Lvuj/PB6DaePUu2lnCzyt1MJXtuUbDqeMloRZA9z/JPdxpw8bAXik9GMVGpwXLtsEfNGCFPUsU/IX5A99wz07nx1MdtEZuAc1MVUXKky+jA/huMGBmGcn53PB7NgX20XwuKDxypUvRSWELq3Hsg4X2LRJXYcz4MYSN/p52Yoxcy7lax7PMOPUJfZnYs7RDvVG4/zAgylzljKY3ZOGA1mkYtWE+FHGHI305VMD2kSAJfW3kHA4AfzHYwTwpbGjGUcK67UPwgF9hNNFOexCJkAgc1Mr3TO8pmV6iAq+IfKjDsZClBv21mjcFDE+pJ4DG4MH6iqLRRAkjzmDoycyS/KSX90V4Hyf6IYi6ddMJvK01cdGF7tovg4yEY4iBTzY1mWz7sPxRahxyMwTxVQHh59UT0b4V6nzir2249IDubBjumYYAkg0t4r1Z5QXlWu7WmyQpvVSZIIbDFD8ZUg1MHcgXLaJBd2BfCMSSYLw9xXrdxxxedHD+3ZOdw7tlQ+MzYZX0fK0eZTsvtURUpJ4YJ/6oiKdoMTH7t4WuTWEhfGvRaEyeP3rkFEQZrauu1+8gTsGhFo/LB7lqrORvWpUty1IF8gP6NfQJgcTEUhn7iqoSY8aShqJSUHUGbUSOpsAxiTOQm+gJWdBqsIK1lEJKOqNwR6ODVtypSecsGOgFMKQu44zVWjMq4PYrLj8rZNGzI4z24/JopfIXMNsXz3Ne9qw3eyoMojRm//ZKkY+OmK+eKNus5EZxsbwlE9Wn+9dLCHRKY/qZqTFgHO+edrE8KQaSAR92Y+Xsa/W3EKw1kgIgv4L0lnoDrtQOdh99ItiQtwup6PYM7MJ9HPJYMvC/ksanhD4kYa8pbFz1AwWd0RpAEkUr8jXpJoUO2km2Bb0o0K7/ZFbwl2KBaCk5OLeaV7HMifC4jsFZ3cocgaKIzIALurs90xxIvBIePEFRPXcTZKJRxe6241eoxymm6IwBjwVmoHlfcAZUeSdwM17EAghiX7eP9SKOUT+qVdCslHUHNeWzCJu6ZZyWZH8sotkJsDvhveeGjRvo/a98VdUSUZ5sXAZxJs0hmyfBf6TZipfIU952UeWAT9j4kmd0Xp9M3x/3QPcK5Zc/+9LxAbAAEGl3XhGE2kp7MnmW9ksvjls8o5eZokm3gO7NimLzxLQlgTT5fIv9O/OQjU7ZN6UggU1s5cszGW2joLCQZM/cLbPpOa3bsW6eDNqiDK+PW0spAyN/eUT6+/cW29nR7iCmJEYuokqBuU3uY62TSkGwvBkg/927o/ODDfW5aPew/rU/zJ7yPYFj63Q2unmMCIv7MNXXWfVj6roFCbruFy331l1R20gKL/1QV2F3+lcXwgA4RDK0ccD5J4kyNWX/n3NuSkDfWwJVtVEiq6f0h26t8rNToBAvXXtEw+iv861D5EEIvS0sf+V9ZtVTZDSd5A/2PYJonGvtbr51SUF6+z7nQ44wr4aHw/uyVykYAPO+rdGoLoxPn+DQMB8a5RPZQ/1uvtimqWun6sArbYow72WVoxXcbwPMpNsSf0Clx4meXuBnvG+5BOR+/916I63uPx17IVqY/+9EPKv1BDEEtirVpqUN5jmFGbvWOUHwJPEGx76YQ8Kp7cbUdMA8dMaKIQDSNDOmszCjXdcvtNXMlhrPRdt/KG/KQtDL4zHpnHKTCBW8ZEAazB/xfbMR/wppFY0svKji3hojcwL9UQVa7rhpFr6JOyt8BJsTGn748jCQwBQxWJ0rd82vy5N27eylKLTQwrwnK1s1+ELHdS6PHnqDIW1jklTRvHkNrIe+UTE6HUUzUdTUg7nFh1OXltaPCsIJHstngDbBqKNS/gmuMChcIKXko3a1pvQqg9G1RsFV7dY2bCGkyK4SvvSkvi6yWVpPQVrSh4yJxKpkWrT0dejKUWnP52DYfh8+WYwm7PyYeKfshGRx4QG+er5v2MiGSTCLSP2mei8jNBA1DMItnuepa+QRoItlf56VHNH/OtAQ5nHebuZPTRlC+hTszXvWwDC8icHaekqX6NUeIuIve3Oa9dyGfUsXjZivTapDvNn6lv2am72ivXyCeYxnKgVlwkKdkdF8tpXwcpDEhs7s1JrROCUMd9CWMBSDySmyRN+PbkPPXLDpYMRl+IKcbAs4nV3WX/hiV600pP5XoY9PBU4OXW2ySn0JnkgjgZ60j9AtO5OHtj8/4bnVkPLJPaZ5hK1FhIf3v2nIEzk+tEPq2T463rtEwwPFng1guZlQorqf1eNm4htHGKtHQJIbvXNozh8nS2+a7P6AyxMi+i7JDb2biWT/uP0lWzPwc6dCz/455si2nExDL3RBXlQVnWFGn8rEUTVdcfkCGqfRo2eHj0ilsvazNowxSONyvocWkSBZjTZUZKHdVw8aVFAkfhO+NajpvQ68AgvjnUv7fAd0h3Rra3UfZMcsBsd4VCmXVOK+qd+3iiyxGavdB7FZYkdaN4dAZQ59sHNQqwlV1rjIiQiSsLenQayfzc+2Ox8vuoQrzfpR0T4IhUKHnUti7CDaI03jprT2mhB0CPeQGbcoyDhjy3/6ebo9USoZGIm3dw+RUTUBJMuGQtrmG3RXr5n63IfqvORa5DMCEB/fLKueXIBumkXrwsg18ReQ2oqYKqijmlbAAtQl9Q2yFAOZ0JnBHKA+dCqit3Hnz0VzZPYoHMtKLIT1KL2bBlLXnmDp+cIUXXN78OuzO4UkYFzXeMFpm3q1M+XbhmodnR0y1vb5NOzp/FCQh6c2Tk2Anidm71CriTuinT3drWv4PoISRxP9zl6MupplBUqoqLPvywvV93uhlvszQ+ef+CppZAR9kf44bNAK3y8qAqQDzEaOMmBh8MjMG6+tYlVWKdAcQ7N4rDU3WWX8/6zNvdweAOJGclDwHyNmYu5MOGohlVodiKxjkXWKI67OBPd+e1zuaGIMzg7RZ/9nnpIOtL2TlTOCAQceyCORbUdBGcMM11t1OslQC3C0GW3MI9muDalZOh3+2XO4EFgf6G4Z0aN5MFTfpdcMAimmkfShwNWsp74eWpmV21X/86MLcwk/IWxqjvzPcOo0cVpkFTyVpG6IyhGiSQlnl4+21iMl3Z7KnLv0RJw37/iiJ0zqaa2DA91UyiL8ue4iGxkmMAAqhqrXrQrYqJEIZSOVHYtMQsNJmaz2txzzlpMkR6SZkc/5EmvY15qAwxRCbQFZ+K8DgA4fX3yF61gjBIdrZ4gXLmYG50Gt1h+Om9hoWokh8c5/3F/MpY/BS/q1mJd2LXdQXJC2xJ5GLKwiyqA3xWZc4X4K/khW1tqX9ipt2oGwfZ0Uny7bA1AM3/etxK0o0bM8M6SrBdE3flKNQm0CckJzRlGJjek1RhzWh1sLsOkXmZGTQncJxi29PWmWadts8ogZq1uxpzdpTKPLfPKGFORgMiKZCJ0iRUA1iA71gxnYKic4nxbgl8GgXCuh3tAgvs87a6AIQOV7afxgo/cfE7Vn4trU1pejD/vZLlLDakLZAYTuh7AYHRQf1YynWJmsO5UDJ726y6mhF9y89paGsFbogPweel3bKhI6eOLmcfVLZZENqwnrNjwpFRmWd2/mg9HBEbqNTzFYmIv+fe1mjdGKnNESmN7Woi9eqtLjS9POlXNOHw0RkKlDjOdyO4EbPOynLfFTD1u5L1ix+V2PYT7EJwKg39DlB6N0Uln/SdbUYALwgiBrNDBYLRAG5d6JxzJXl1o71gRaawqTMtX4N+BaeogJI1Hkgn2IF3dZYf1JkN9CkECv/7MmJKks2XJFGa6xOBvIQQsV9E29F4J0zIJiOx7X96m+n4BWYIsl9hL5JidFm9tdzqjcDum5Dmt5JRFkNWetGCCb9pOv9JS8PMmLzg2Dy4Pl8tGm7JLTI6rLotk+/6Ybe+3AOInZhieMz4xuJ0378Pd7La58QVK4cz5FGaUz/YRWHTpR1pBgFVXFW7WB0rs0zU7bB/wDZggg1StBvwKRRk3wza1DIir11WgTccKqxZe8lauL0+v6ZRcKl9/mfaWcXgxsIxsK9nu5uVzzECSSn80Wl7UiMGoNsN5yRyW7QG4db1pho7kEkdjBuA4LkMJi67AwqbX68sQMVQcfh76anZtl6Gcrtxm+fCz+jdpQbzt2HJ15iE7Aoz4u1fPeuyzGddnUqoHWx/AnMBfxeDEYxofCWFfwWwkhUsxUllVrT5k0AhsFAnHi04vdOwTwH3SgVaXj5dMBEEK30ci1wOlKKEc0AatUV1ugH4QJ6bQeNBh0nUo9zRtr1fivYRoANpKQCeS9YqVcKLkSYh/6GDyJsvi3D/UFEfGyJIQz7OtuhEWBWC9P3NoqnjYXC7kp3uIJWfZ9OTewWVSuxKnnSZfA2zZuc/N6MoIu7VAwwk3bjtnOJEB5jNqkgAVEX69sWs9e0Fmoh+Ejid3ZL/nFziKq3aDlroynMNYutPl6hAIPX9AFuxnZCtBJDm5a2JGlwUv35889lNc33Q54zcUWwbDRU1vkdELCm91RgxeAHz/mWSxn4Bj2m6JtdSD8xSn7YYpFcx+pA8IpkIccir15d3QZj5MqYbHwtKA7cPGvtxr27hZjBqeyH/Of5WlpykSKnNJHUhQzFJ71fDJIkIljKU0TASO5rGZHDC7tqpvCL4Z7Fqmm4GyGs/+niojsq9fXihMGiHhVMIaZp//ibd1uPdJ+tlFxpjKXra56YCrPq2wKXN7LZeSZOfByyQQdBfyT5bqKTEmimeSHRAxmZpNORmXj42gURSmr7jqoyFS6SGq2pTgzQkSDHZsRcLfrZAOOlR3mja21pAUiL104mfRb6+HZvC8SKq6wpxu1x8Su8PrW7X1irDipORXue6yJOcQbLGZLjvLH9rQ0VTBr2E1XpnH99x2M5kDcYbBSAvkWwmkW/LOftaANnF8ruoKJT+rMkTg7Ox4s3uye6319atOM+Ml0R5F5HCqWmqeOgLc13AfvVn+qP0MYIPi3upLo/JIcpnCLNx0tYYELXpRkxTLJTiSf/CcBLvnOy2+Kdpaf054SIOZHY4jAdZUmONUTNHdx8JZOdUq9sKxcRpINJJo1vRcLOybz361ST/u4610/2u/IHvodrc2o2HKcIBHv2cHjKUlNukXnZML1Hh8fi8wl5hNbgfVxkpWXW0lRTv5Ehm4PkFwS/Tx3/e019V5jkoSiMN7Wn85pMqWlId5G/AsLeUF6hyqIBDvNie1/yyYqhscm3AHCU+sq4NieCOvs6TWJJ0FBaiIp1Ghh0d+U3PcNAn1tvA8HcArIVSoK4vsQP2m8rXEF258Br/IZ32sfGWbYKiwhASJBCDD4DcSi3QR/vqXKSlBleqmnT5beplWVsv2iU3VyIkXmuoHYB3IW848PmTTAfMu81tV8t68mzRz0SJDheC+2OvFV/5/F4SV4aiZf8e2eEUN4IKyDtjlyynCJ/MmGvZRJ2xcRlapuhi9WzF5OIDo4ZTHUgMmqF0b97LitA8AIFKDK5ile5OGzaepRI5uw1hnGMyQ8phENv4Y+ryRwCkWszPLTFLCf66QMPoEiTIt/QU2Ack3z7GS4vqsBvmX/MSgWuzV5H5yEy/b3JAjKlvyx4Rqj2V/rg7ql1bfOSGHsWmgRwTfsBsvFFk9Bfr/1D3CDWxKyK/5tADG5TWZZEBUcHywZE1+vkoGUG5pkOOJGKUK1rrZqG/WExs5cwOSjqSMrwpND75NrT6yjXnS7RVvRWF6Au02zONm5D+wAF8Ui6FccgP413OZeJzmFZGICydxKMtZpU566asxEQ7lX7s8Acs6XErKZtYJ5QQ5D1AKtBn8x5p/nxdcu6BUAZCEOW17wNXj9JfR6lHhcJ/8qQ9StSevQuLZSJo1QcYCBEjfxTyLRTt9zprEJZW/4HJCkKp9EYvzDsnRLDbL9Hu9ZP0cOt6LxF2uYF2604ytFjwRSoBPZNRDF0g+5puJoYQBfIOtUDtLodguv5TcrBj3/XKpHJlAmH3EsGOuOmRuW1ViSJCGbkA/nlP4eb6gS2kgyYT41aTfRQG0+EqOOc8kxVYlzlliNEAKqMExiiFQmEVBVXUGGe4qhQDUo1YtjVO9PX9DMcl7zy3nlbL92mPL4v+D/tQ2R3fw7HVgiywn2cJQlUPN7X/2tFYHYSbJiA7Pvvry3V/7TIQgG4mVNaq6bipYfwr0/kx93sj13HYrOKBK/Mkdox+CsaoJGkxFBzECHdP4uGZ+TQrA2UYfbldinlwfAYe+iK1yjpxBo9/3671yFcc4XHDKDQTfkT5ByfwkXx8sB7TUv/xEyTnQomYT+m3yQ2P4gS8liqHBH9BopHSrSvSQ92JsNfQXbeZn/BSAV/i6wEtNjuTvHTcZthKPUmZslfxh+x7xsPVGqMpzSEoTD1tmLbAFjVsoEKlJnqChUO/gR3T1Y34B5zzl87CrPF1VidWqNDTRiwtYCyXIiZE3EtSzia+q8M0gj+2Jt4O17o22cLDoeFbkpd9xtmhlJgLNBhwuTkI5yL0S+OzysMcTIJUsolVXJZu6LTaAnwGIhlW6zSFo2JepSeNejPp8VOGrNhfrzi8mkFEbO+VTpMF4xWto1St2rhDz4abqDpFAFJePUqk7PW+WnX3N1HhrKX1mUtSrpc6+qXtUB00k4OXBpFM2eZ3GThYwaGihZA8ydOlIVoeV/GLamWYUHtU//oji+SiVIPIsJ1LOfCkUcH9t7ss46sV1GZIaPB3yWvrfiYTPEGYa1r+zTxh5IGzvrp6JHv99lgefMpecb5PIpr2Yw6FoQKwxYsps6259TUHpOhY2zaLcrCNjicP1IUEPW239+3YhsgKiJghW06Y3rx8fNcuK78tRrijtBYHs+x79jiQKXAoF7o+VSt3d3lpRf+zyKjEvzbvBb/Pxm37e3DnHJ3uhml3GBjXLIKJjSVQRUu4dn8F9t+RUcnnVgaRzqJbwYdz7wOs6GCR8FA/ylvm14e8GY9STQl7XipH/zZAnoSkpoYE+qpqqlXpvTCg8JlNDGmFd3lM8f0D8ov3jfEQOWkOGbvUXvgYt40MRxi5Whrl8iaolJCKirAKbugMeiqe5kHb0sTaJRCKZMg9jSzC9j2S8si+NZAmpUBZMGd7Z+XKKOndzqbnp9HX4sbtphN5iVrZQ8AskQbFb7Q/rKlG88+JRcrl8d/hsEl0pxSsqbAMXHFOVPOtUd1dLhD8jfell/KqRYyJB5mC3B8WleZMI+Xl0xbgxa2yGR6inYY8qR9JhRcwO5ApO6xtv+kf9M4L+kNM2skdAWE9bSXExoo6KpQweH6KAs67gAxHSwMHWFgkzkaNTuzyuTxkrmX8yhv7FwnsPyuWKggLBEK6HnkGg6mA39r1Yxd7oAjIRsCTSu47LZMt6cXykhdK/kl0giHWuKk8schDMxCCLwYcc9rcfKPgrUrRfjX/NqsSHkOzCLvEvJiD7+xW4QYl8MwDLKoMWA8IPNiX3ZinT2OPVEXAv57wEn4iElgf2navXBHn47F/ULJoJ5h39hJ68LAO1IT89juaqGyp2iZ/uR1zZRhLFklgmdj/RhCfnzqMa2fP20hJRjRuVYRgpRKp6E9EhZMNBr9cKE4J4iTAsTLWkHdlFJuyoij4RBGEFoIuVlNQQYIW7Y+l9Kfc/k5Is4MVLlwBlextlsLX7TGEossGXJ1/0MiquEfbnxFeCcQMJ7oUOLqSQzaR7aOcC/42xBWlt7ZxiXryCklNc99GgFOs4ezpsgp/976Y+IAbYm3415ccJ04hCWewWZG8CR2GLi/NI7qCVaNhCsuRRlIWvIedFrI36GxAdf8d9ceqJs8htDKiIAfVgnCsLYIPwaBvJUB4wXYK1MjXTmRa6FNga89MUpQHI9g3PRFyXw1u8A9KpeGdB+hK9WYnFhlHiSNHP/0Bc226NC9IYSynyz2Q/XQUjiTV1PJyFsxpHyhVCRNEnIQGy/HPSIo5S08U3/pH4MUoGb42m+CJKTJmRXYrrR0AdLgwd2xDZFwm+p4Vz+HpRmS2gnQANwxhZN+6wGAT6RlnNEay8SunNAK921Jh9FOnbAiiYLBylYr7yC8yWGZ2SfWdXZiDHLE4qyCZEdaxQrLHQkAJ9lS67pEx4OfHoJx+TxCwn5krdlfZG3wE1IDVaiBgwj91eniOqjuphvgSu0+1lbjCJFs3+28VZpt2+3LLpCOv+SZunqPNvOjlkqw6vOmV6b6yQ5tHfTsfFVxmQpKdB+S5rFWll5Ju0DY0nZma4QviqbHW6xeGQo/VvoO/5RKljMlO46NDkgMsFpJWWUYPR0IWQMQfsZ/uRhlWhvP3cXemYeX3dLQUqHHXZLfrYTCBgLOqOwkcu0qPTG8dQjxResKXXm3brBRuzxKbTfuz244RzsP+RsNXoKQ0ikPuVWNJnRaISVuwuXr8nSgHmoeAYOMkPwc8lR5j4n+3GKqzdmgnf8qz4NHwCmvBc90OLjpSzDBA3TSjPvuigN1DJ2sHWbmlJ1FjaZ1RLcNro7h7dvkXBz76cUXfn45cGcli8IIQ0TuwStnR4ZZziLUa884njmwl+6tH9lho69owswkwuv72Wtef/ng5SRgPe2juA+GuOPefUQK0oYeYSc4T71qLRGrdweF0BhqW8+yZqGkPbGFpMtcyIkP+tIxrRTiG4iXkYCDByW51Z2b2JRDi0I64kyl1oikndHtrpp7VpeRBbEebxSWTx3TjF3hin2j3Pleg3bmiAPacUrUv9pVI6ZMpfhRRzhixzPnz/YViaBwljyT7zB9SvqoDZEpo0Z+skMLVf1fPgy8DCK++W+suVHQGbZ+49YvgwitJshDraF2pzCQ3WTDm+3Fw/YAfoT6409m9OflL3iWJjyVCWwKvDu58gswSeLwqMSEMqAuwU/ovqu65OzrFatfmy2GKvrSrfLNkiMU+BrqS8y+M4QyMYLbp30/DFfNVVF6hLJtW07DYESIe9H8D+qiTekIsw2PepfSvfCR4cTviRNE38i4Ct3ydGr+Cy5MZIr8I71afpNeJ0p8G2Q8g9uWkqZVm6oOv4lHI5D7P/8GzfjeUDoA1dMGJCEE7xVwdnWnEL7mc0yti9KP2iDBQUwMsCeKhIoehFkWE/IZeuR/YMWhR9TR+xY5aaaUUOWpeDixxmsMB7CuxkeEYJmXMldyd5nEjddY/835yZiZgOHnJOiUvnoosZBbSP0rdyntTUXD3p9TrY358w/8676vWucfqALwmchqevSl8LfXrgOyyaDWQv4ykc3blvzZcItyo9PET2Dq26OASLmK+31H9wMCmnctOEF5abdJKbzGgMAoJhEMBzBCQaeQ5WLMrqVyXXABmfH2jAeQo+DE0ek9aszJNN+pf8l2N7wgk8SeOPqUangt/5eORJcrlvO0M4X0KyRhEwXNoThv6WbCUPJmAYqocx4+i2kJqZbbOTkQ7Mm5ToU9eHR0+vdLyY2HRGL/OErAUmU+AemsL3S2zOt6JMmQSgqpXT0lVe0fem39dl8nEznO6XNSWHn2ercB2Ew/mVTLuDI9fjkrRpCp8lWchLGdTeevMsqnnhsKqs01DZnYfz6Xw/NN8lXcI92jK9TWMxkPaZVOyScjV6qDzTVBjbWLyyF6glW3eOjwGduKQ9UKARP4Fc4asNB3KITQ+WF4WzBTZUjLaLIGaE9Yeq+f4sdzxrR9fWwoas4uKlyYFVVB4hbN+MCO0EEBUcwg/jdqyDUq95UN09tT1VMWHFYj+ED+AGtsF5xq91+NXABqUZWlICEwqGaY47m2z2Bm0A96Na5isAaLI8o0GXunq1RsahTN7KYxdYo9EnoWrMNCo64CUxOoC0ik+qEy86wYZz0MeUhv0rmTp2REk2guk6IL40Lx0hsu/igD/sCSytMg9d51vpN86iIj4IFdTvbivEXxNII3szultXzIW01ooyGF72seHqpE5e+nPPeM1AjQkbeaurtUfqrp2rSebFa62y8dCcDGT5gVMt8lG7QvjmHgx1OOHpWgGyFPLj3Erxy3dZPwKEUx8CgD9RpK7Z874Qr7IfMlyAHLCP7i+nzcoLhrN6c0Em9CLWzH0SBZCDKfHpEXbGg6Tdu1iF9VIEozVw7LTkj8BKOQcs3yvZ5eRkuBQuNTdMkRnQPYzLQ1pud2ocEQKyZ16W9BBUq8nehb4PQ3cH2TSp56ziqw2dcEkcTKxlXUdgUtpaKz9oojIBVAyGw4YSfNYVFbivb5BLJbq6XA3lor9t4M/BHGHCzAJuo8589XmZLta94OF8sRpOBlstpA5bOZnuEBD4Kf0fjCIyp4WRbpBAfIrzPuXAdbAe6yvRWiLpJi17XYUSIiSxyD5pP3khCBAMzqY/MtN09xJFZohoFe7rbAfVVLpkwgWxEdxqhUOIvOKx1zD4FdPDwPUgTqf9wl2pNjy556SyrdC3voC/uOQvBOF4+dQFfrG2fppdWaIRqk+0YaOIrtiB3Rey8n/FOHuujnDJ5CwpD8RcW+/ArtuDiAOKSqP+NDBxgrqHKvUmohRUg5w7ICm1bP9BVvdAZ0yVibuiKpL1Q9fWC9n4daO1LWCfssM66kpvs7XK9QSJa20+JuTIixF8kELUQr7Vpnk8KE/5c5GzX3kR0ZT5E2E/tJcTaaacly3ISAlxug4F3dpI2CKTs59af8C3nq6JWOuW/loxEg70v7u9nT6xd6/YYk81KO/a0XwkI9aqSbn3Q6MiA9oscOexyBX/t6xcLt93Z4IHmDz/aJTu6S0jyq9FmBMQevoZawvvuD85KTOw9yYGpGtpHplkNRyeo21EYIZFiYYSIWbkFVLy6ShiBleuPJyxuiSL8aSMTWU7p4cGmv6vBscDoZxLfQXZi7y28OXkHi2PpJIZ5Vr5KjXHS6TuwxyGFl/zWRcYHfZxIkp/flu5sROwcvROJwsX71cOr3YFyfiGhxzfT3NrJQCkISOY5cABnya7xLQO34IrIBNh+TbC/EmSX0LJT3Ojjtt39l2QHt3ftNgnz+9g20cYNLyckeahnv4NivVkP3ERqKWRXtUsFg1HrbgAdo+vPmw2QY4BsavOxHfVuYQFskVJt/SyIlUWkmAz+QdIIZPIsyI1n4xHHkFhmWZrnZKD5Ima5SCzNgVugLWKHBLLILfoHXwZQAv1XEGfypNiBB7L/ApkkJmEtyF+PvnnV4j1VpzNd/D3cqer7+IvLtxjY8qR6/HVQYneXl5638rNTTU4PvtxMx+2Odj+mADuYTERnz6aCbTBtYEcM6uKrzg1wwth/UZu41pWBcPUmgm5292pkgQBZyHXuoFT/To7glsk4JsSkm01pYrSG718TwTvOJrvrN9+xOLueGfE84imtFOfoTl1kWlSWxqP1SlBHyMPajNq3tCvFWmyhnsuq5siMgjtH3eumZ6gNhpYJLekucQBgwbUwn0zTel0EzUJbtIHW9bZfslRYsd2ovui/3rBS1ArP3JtUeau4xbQkeill8Lesu1juDA5oqbTJ5lEwBKWz9PSH9ibQ8aoyMtMju1RAXKNAA27a8fqBsYvJ0ALVAAqTtVYh6kkpvQXzovjY1FnsQ4UwKW2V4t6B9fgWFgmpwhux8Oj8RTGzdzsBXrh7aKMU07M+fdnbT2cmUZHPAmIqh2wwZaUEQ6yOs1cHNmXiTFFHJsEFKYaizJpzaa1v4gxpfpKA+IHpp+jMf/lyc1KptTGgUiNonRN+AAT4OW7PcxThetmjO1pkgFA0Q0W6KLm398JssQcL5n3AQwj2MEDM9bnSCHNufmXPIz0WUZ9E4pvgzGRiw5rjo+uOd7F5MSTZ1Edag0Twi+nikfTEwCnVxwqCLkumzfw331BuX3lykLGAQx1r86wN9sdD9n6PKU1wgqiJ7tQL3sgRlIHglOpc9mH4CxJcuLDsmlBgHLzDcCR/WwmIy6SM6DUujaEvuQm1iX53h5CV7IwGlsL9fWvUBwjwhZv79GAyuZ/ANvBpyJBgTrEDps0zx/C+vMidVVofBMC+itEA0ZnkUuZMQ0B2/BMW8ZU/dQE1v87HIUCsvyLIBiwuQvbO2ZYXcvRbzm31ZnYYQztKT9MCJwXsoUwRtjiJhrKX/lAn//0Rk6gzhrX+snUq4F7JxXgE3m9mWVAwssTi54FwMnUdbRX+YJXcHKHulQ0zrQTo/DhK4KMgf6E7AZgPZB5wAA4IifLF2E5524vbYrknYhkLmUqdW00jM6v+rVRiuNeI3POuNGC3Eia3l3gH9MtNTHjnrDRk/YiKvwQcnjjtNg0cfjO2Q/wAo0yxjVTYngnD9kowuOspn8w19jZ38P/wdjpCZBzcNg8tCdOOP0S3HrvFrdCaPbQLwaGkimANu3bsEeTc1CBuA3nyeVO8ECSOlDFLqbcChjYmfedx7aZxs+jC3uF4M3+FoDC+3H5hLSmZAr1dedJO//KTiwSLh5tjCtLyp279HJihZik8yJP+RFC+YLM1Ew9QgK4kTW523hMsa1DPRDs7iBnZjtK55AtxfY9ZL+pV986FtsMApupWCODoIDnaFFDIGZEHJ5+wxWa4wpK1E9IG83HibmNAfzWrLD+aNopGQixn393G5/Kh8/xPazivO6QMgUVDYtcFRC1hK+/Cf4Vtrr4ar+3HVLSOmigWwXi1sgcDC3MEtgnSF68PmcIa3l5WKaPC7uweFRyFLNuAyPakP91qUyqCkDSpVkY283W1fRDmTCF+NtXM6KBsIP1z9wZwxkZ8TWo/WhSBF4Yh0SJQp+x8eiAVP7HgnQ8DIk3CvjiHR7IxZYbsTVCkaBejDCGFXvXik+XThSsiNk90NZu5ZDC+p5qu8+1oGIZTO6nX0eqNGlos2Sq2P3ecSO7wNubwW9zHwEXf7ThPNeIBNSrvvuX1lFJ1QgivRqNl8uyuqMSBRnzFm5eUJwTpgboQ2sEk/pI5XMt9J2xpd/C2SZmwcQdPdEdX/BwjeqVbUZDMRmxoc8/gREbyU8UWThxJO5B3Idx5JtKJ5AWQW8m8JpDAN6yPVLyJzfJElb4vTD+XEmc7cYY1QNUorgqsA+Dq5QMIfsO7LmOhSQX/dO5x+K+DAaqdkbPnD0feQSCfYv23wyrCABtMGJxc3e5vik73rvZS6xezlcBnfPb7QKgJi2wiOyW9mUjH6CUmTAIwcC/UG20JwlHiYZXFm0LMLn2DlBLmyTdGszjTUpzRXdVkGnGBPeKKpnCW6FN+S4YDssIDF2KTh2sbvhUWYLdekaZMUcg+h8NIyXZjyBAPBSGZ2U9GHwAnC3JkyMk+w7mhOXnhKnL1z9SlBBhPReYsgk+E76LLjcZplHbRGb0hQ7AgED7xJF7G9swP0j1SwssaUPkYauO2yrMIc8BvZSHEpuK2osknJWsTw1Cb9Bzi9yoEKIizQaG1N62xCawDZtBQsktHTOONV5CiABBXuqUPH21wbaKOFcv+KnEZYn8xcPKdl74XbM2Fjdnp/+N0OycfHJu5rzlTNB7js6ML+WuqM/BglXpGo+5PX+OOA1f4HlAhKx26pQrTvvbMYxtE3chhQ6NSnLsIzlxE7575odZLgIvx/oDflbCoL4tqpV0SuHusCe5g+GTmJA/cY/YeoNBio3kGM9SUt18i9sb1jZuqlKRWv02+i3JeORLnCC2vN1Hp01PDXP+N+kOZoQ0yrlcurOOkqYalX5Xh4Jy5RnaH54wgkDpaOK8LtGrQPF/vEoQHvrQwJCej7/V8PRdhFh7wg67pY9HxHJrI7NMKj+wU1smwimwFoc+wsYd7Eapqz9OUqgTfkke3AKQKTthrLAjAKJG7rlS0PkUWwAGC9Ri3S+/ar288NYqXdy0kuiT8CuI6P3VGCGYtwhaWEv5XoOVrmgJU5FO3d9EMR8MqG0RXdt76AhuoY9jTS5wMy6o3OjqseG8mlipELvp6pB4AmgLXN5/ZgToKXtgpRoaA7o/opeaMFXND4/LquMsCWZs20rPXBaR4oTcx98fknl2tPRW8k+eyboLMzqCdMRG5OUiNaLbQYLOIZgp1MDdNgGGqZsSB7kjqcb0KiODc+Uz7Cf/X5KRRDo3L31cP3mXElzeFUJ83IkR8DuNMIk6uLT3Ipw0q5k/2OIG8WtAGyBt8Rb0JM58oa2sg0Qu6pV/eq1Ax/hFShxnpJXhfQR5xDebExZdx7E+G/mZCtN0kjoOWEO6wg0uqRi+rWCku8HNWoY19iEn8qsjCQm0bv9qpjgzxLHjUhhymg32fg5Pzjq98DAwaJrDGQTsvS6eLo2V+6UV+jF8Fb4PJG2VpqjClG2c7kRvmlovqjJnUTYM1JkL+FoGxXgL8byEOJlzc8QZq3N4g8yF5l4XV+5uRHse+7GVSbbOnhKgWed4OM5D0x7wiH6q0BLSApAc2kg0REQ07Ut1+JD6NMoEJ1fwIhzOqlUcerrXEkz52Qx8T+tgoykzo2WsLQi86ixfs9stSmx9pnNVSSgbwHg/TTrU/wCfRhuFDlnLTwIRTnmjVIKbS+HP151BL04Tf+/Jl52FPVi49I3pLd+1YeiY9L1CtBHfNVadc/ItsnQ/XTc6va5tvb7iSg71YkPWoEZOtd6rhvE56iKWy5ZC6+VfC7f8/ndLD90C+OfnG3oyq5hguM/u+6cBUBy2gDlYe48w2fhDdCUCM2ndD/a4UyXfmhmCerorgfGze4amT8MKJHvEnc9wuUzuEAhxvQdb3LxS74irAjyHNclpPQwWkiSmx5w3i03/qox95LmmSDS2xaoagbj3vRgSKhlcBDBK96Sr+roV+rHxqChZn7BIwtFxlmfPV/EJvniibgKaVOB3wJ1A/S6kA4ZaWD5fnA/8uQo1/f4fXiMkzLGNW0CDEws+9QQD373VYsrtUonoB0exinRrcDMAUhZ29zJqrIRr9ugYy/pdThHcS3I3rPR7+1ddq8OZIBbHjfDbek+SkOy2tNyvNt4geeh5tqew+xqjJJ9oBejpw4hqEfY2dxxNh0nyMCvw212lWY6WIZRdwdDGvZ9kp2NgeiP/EB7x9+PON/2ilgbX4K3nbW8ykfYl3GDRSpJNzAYWW8oXt/y/FgbmjqnBO1+dSSywzX05eAijvyaxZpXFaz9xD7tfo8pfzNggMNq7ne2GY+DC6HL4RRwIezay03T6GCnv33SB7ejsUwGuLfQ+8ekPBstbstJmTSb/Wtpg8h1pJT5NB4auLcLaiBDqFR2OCGSbJyp6Who9oYPIM90hxD2s7YbZNZ4R4oi6iI4ShrZuSh94u92cTB22EAkd5s6FnZa0RGeHWwqqrgrqN+1em5+5gNjSs+RqYpjmSoWyPd/zcUFuDJ9I8RfJ0kVa1HvZAV3qu8cgeDlhpxNmBmcknW9wDTMOtuxuEPNrbiUgRkG5RG6AExo71Ou3UyGXpjvMHoHUAu+zcO0i4j2EH7ilW0+xjFfSzMxV9Brk24sLbyVfOnEjERlJmsua9WpO+tlsG7u9dn8aed8CwJzDi85CDs0MN0UqYA+wHQxqnQGbRKoGJD1+op80erKpYwhV/7xhfKqTgSOu6cKc6QCflYnRqfDbpkelokbaHLDhfB+Ll4CK/3pfH4APM0X+Lb35TG+zlgnkxOTMTiJ/CMQYt2KKF6y4DlDal+HfPyKSBkAd2IJrIN6Nafw4ycJ0e6uwjW6U+VZBLYiXwf/+lbaXrk7mtIUK7ggrRBUXvsr+roHvFz7ttK7I4yz1/PllUK5Q4egiPNbDHV8hJO/xrYdFxxBXD8i3RpmMl9QHQIT17w2hnDt/zCLmNWH62qX1s2UvuLb41PEwi/TPeFKeblZxCiBvWeA7eKc465XtWO0QOkC5wyIaD5xnG5RqjXVF4UCo7pRthlbzKJ4mTSqLRC3pJuFewrBswLGEmQgBsU2zz/6N0RqT/wglIFo4eMtPkKrtswO5X0EHoRQ9Dy+uR4uL7b3pA5iwQaZ+fhAhQCEqd7PTF1q/oXX4LSoOBVsAUMz5MMj80rJ7x4093p6i+FfaoTEvEysyXLwNiNtJKiibF3+okXljwS19s5U9Rn7Ul7OT3zQXj4GkULsuddqu/ObEhktcMxIZz6Pln7n6FV4tMDeYghDPuvZIMa7F64JH+BXdHEMdZyAVCUhmk8WMi/n8S9sHJDgoiTwZOICNyH8O8v3GVlAlZj3p9DA+zZwErLotnlrovJMGL8KfUtlRtWLxIDY1VbnlhIATg9L4jqjYMQBlJvN0Q853lgFum6fLEquJMKyzZEY/TxD2JDsw3GzRadoxZFaxra5HPvFEcE7peMLW84qYWNDA1VVZ7OUMbuRTQqRen0cK9oMhZTkGUO+fMoXo5QXSa7t0hb+mL8bZxRiskr8QE65WEmcmnjNJNd4OOfXshxL9eBKMjVfn6PE+mlJGjp1pmhBfAmg0PPmuW+wWGrLh9b+m5X2cViy00lVAJxWJUpehdYM0dGwXqjQIDwzUgSdqT6Sa3h1D2OIbk/x7l800R4YhttLH1twlh982vx3UrSVsewGv9TC53dUd1aVcGA1QWfjOrWkjNzLKZogjGEk4JOjZB69n9hb4IzNcpOOV7iWNIim8Lz6koC4uGt3+WYsOHs4oUGfSRUkK13LoUnf0SQBTSfFPwZUrTRflyIAdrKPLylFOTS45Zv9jVpVzzKZ/1zA+1zwdwKmWNnkMUuaJuMy/Jy6G4VJKVY2wYina4xFCy0yMKUuBnjCH2NNh6xlG9f0eoz8EqdnYOveAfHTYGNuRTXy1HI+zJFU5QefdHC+1iCmsQVS+AryyYVc8XYswAqiDipBdRXYyA8a+V2Cs6JMXi1NG1+MQKoQ0oGLtxjQpPgvXHB6l0kObc4O+Rqcfy5dLQK5zchwjHVS1V209WDVynx0I3YwDbtWWWbeqcuGhjbz5zY4BgITWLGyaZsCWEfL87BhhvsD2D+5IhpiY4YgUM8d398nhQsP8YCPXC+CQpTJXhL17RQOg3k/PaY1JQ0FD+ep5rYNbasDrR5lqnOURjO9NqwvqRFI3prfD+eH+0FAD9yN17wlDj9de7LsfaUlSStSQ0jAkSV0ANF7kTKLt0+mie0GJPp98yHCNAsDIpsxE26UYVdtctPtPuIuDUoQMYcPE76XlG9kHQxKD8KUktz97FHXXavgjBLubg99ubLWos7+lfgc7sbNMCnR34yDlyZQKmVCzj6X4VhFnlEw+wITGAHJpDMVZd0C3mbHLFI3R2tZHwDAjpJZwT5urTfdIYiqsj2QnJ+oTBh7q23fnWIg+zhP/w7b3oLBm/WCF7+jFY3V9IekRPkSFJOri0L0pIPYS7y5a12q2OTeTasN/F2fA9oDkjoDtIdvqtHy0QVs0571jZQCquwsaBFJ2UlLYyAwqARr3CtIP712Lbe2XdpbeyPafuzZdcvSPn4d0ycyua5pjnK5X77B83j6sJUN52Np9DBqirVRWADFgFNLcvnhrqBUUnzJfcokFByp554L0C0cSaas3FLqb5pi02UYQSVYMsw6s7JAB4UCgeLu9J9R1+ygauFvoM+VHWhkig2o2oTO8xsPfgkkKRgr43AYBWCC2LsInOA/NYmHrwzBkQIzXhc2RkufHRqeekshMXJgnvFXR3gzaR2G/lWB1RmUp41I8K3OWeRC+Cr0+xhLOYFXZVp3wAwgWXaQEgrOUI4vC3/abLth/NRzhm+o2/gWl5sg9vAWBeJOR4V7OWXtRxE6VHRZd/Ct7ZYLeFKAlQqqFnRaXQZti8+IpQ2ofd8qHK7Ol5K3EqXKraiZTv6X99UyudJefMd1UijpcHqRCBHOUizO+aliBtxLR09Me17eXfopHvFdZdtdAR4GFWtHr4YbjahzSv0dqX09P4FADoPmPHHiZbKbggLVy+XggcJF0poYv2zOBaWB67vI9RMSFq2P5lo2Zu7Y388HeashHrqmtYdrHsxa1WgLeuPXf2SuZF/1RDlXYT5ErKQ6yeQaSpDnGIamkD4LirRCDjDySvGInhXHfVVjS8amEFdqOZuy+ROZBynX5cueMjKPRPWGmFfbtcIq0j6OYh6IsmIezmLnfaBoqg6Eh5giYEUbnHkCatZ0Bvp/64Nmvbg3+7h4lm9Mo8w/ZEM9qpL3jghlC49vTBBB/2K93oByF/3mQo3hTXKRmR15Xrsc9oDxnhhm5PtclK9WXMlTj52l1PyJjHtTumOoh0sv6mbswBmZw/aXTEKqN4POFmya+xxN++c3r2UHX0laP9OugZZVbB2A4IZxK36GzcLsXaZoJnkHvV6atH0/7wYT+SNO24Ms7l1gmsqsdC5Jc18y3kyqVAq8JaW0kyzVC0CaGVbx8vSNCcRuPx0vyGldL1/GhJmi0ZroXhTeTGYbgUgLwrqjus66UjZ++pzx75xVrElkM4Tf5I9y6nxN/4UKtXR8nBoQIBRbO42G+Ol03EdrLaumx+N6CrIq/pSGgRWzox+VnmcpTWTZyVYia3F4nmZ5u8ILNgpbsDmrAEZ5BB6lQGcPJ5arrXWn9xMGqw4ECZ8K7yV1X9drFiswP1AMCrwAPyRvnkXsLI1IbpaBsvX8KUDIhhpNr66991gvW8y+vaPt9rAa9IzyXKtaP7C/zOHG48HAhqDGUEH1JXbfmp/JBBu7hAOSGDdx9uOS6JfxSQMVOr3WWDdv5gemWTOf4yqUVD9zjnHOwkt1kAT4zjvOinUad0UAcpM4mYl06Ndyq8q/i2geNyiQ0rM93Tf7HAalyw6UcbCbmJb6dSxLqTdcmhZpW46XPbiIIJg/2VPhJqwypHwqHascS7Q2lqIhOxroVvpybY7F8QhAPNn4DYC2+l+kWZR6wZLaU3Y+BntzoR4fT6aSXMwoM9ffIMOr2/tepNR7LxZGaugtBJDdGwlExuPFzU5sTlqGhjmok/Urvymj1UkrOdgmH1hUJxkAh0HBNZXLbYpOcFRXEIV/7Q0TzYxYJXJmmtd1oxWT7WfsE5hpNuDoWtUg62n539zOWxqazwsLvST3i40cfpVxoP2+6cPuIKakfivcMS6CNOsnChXtFdwqIj91ZFMHYQaROw9INNUPuKr3ucqBX2QG6SJm5DK5KQw3ZbHimkb6BPc2JCMTc5VSXFotKgZBb598V6dm88GReurFZtf/Rd+b9scAdx5eB1Vje9pmrc7pWYO4HvaTK4DI6ytRqgXlehFCnm8mtl4ct1OJ4IGLTqnmDsq0VquE051/t5b9ovvgwvbq4MxtmFxSoELGRq+T0cGEkEIwQOoSDbmObakdLL6VdUgxIg7hq8XTB8LUwYC7DExMEEZNGrUk/I7WlPdAy6JLXYgKWRjXQXWM7GuqWt3R3hycGVn94X3l3V8YTgwfRHz2aQ5vg2uS2x5vpbrG5OsQL6+0UweVJaalO3tc0fewNWwkf3M+Sg7qRPflkQzjUL1SpB59Ur/H7qc71GYFc3zzds2APB6NvTp4KjcjqvUOk55XiZL0G0f3hV1b+5isOvN1DLFFy43uqbfXxxcZcYkHpne0mee567ayiETJ7YB1MPukr6FK2a0/qMH6d9WFhmyx2AYjVCOWZ7wdiQUbCOp/aIbtQLyQ/JEk9Ha1ufDUJDO73FvDNq58nTZahzJSVC5RPaIc7j7qburIZ1JTeI4GZa3sIsgQmRYegxPi4QkL9rmuECgAV7Lo4IqB6iV97Ub+g/v3iSaloph/Z5IfB18ZMW2Csto2yc/Fv32XQ4+/zyKhzR1j5ypwx2lxgGM+IIJMWeozAnSs17NBEhex97Izz1IPwOG3L/AQzkpWvBSZmUGR0j/iF6wbe8s9wik6yTLGcXXBaj/WwS5Gv5K7GlgbanqbK+/MpqrnquipqiR8o1l/DcG7+EE9qY8x8f4t2TqgY65JaKHMcr/LsHBlW0K/hFD1sDUPmd2KoKOYxohXW4xE/xT3jc6KhOgg71pqrpLN7ZPZweSl2WJMVP6Bfy//f/3/HzXsbPrtbPrARCsIeka77s+XbUTQJgFDvApIkCgZhJ8kL/LvC0knRtTz8LbKvCfbWNuNfQcczRVNKh7g/WAmOGXQVrc+m02+x2Fe13VDFxHvlY7cKUezBvdSZR2j5PtdGbb2RZ0ZEVTHpzLnWEt3LqRuRwrS0U2I2hlZPdpYkcSY6M+juz4xRETbH+k+TUq92SzcXU38Vf5Igsqv52rghyYQ0eNrx8lZlCOnjMM5hnnM1hLrqK/omAGjs7B3F5I4FsUJCNV9/2sz25UmSPqWhgEgvmjAgZtGKEW+IXGACLaqe5Zn2khutY8HU1ak2/0NSNOZttit+b96fQOrEKU60upGz6dKV1Bo8tI8vVans8cmEY9zgsnhQD2Q2RvWGbhi3VsE7j70dNj85l4YYPC4ZZ90S9A8oKdWcUhGZh8G79kf536lhpTR0R2xmspVgQysa08DSPRS06sz/xUWkyxCVCNwi4Fx9AgRIKjOerK31Q2QV0A1ulwhfJCcau+yTptN/kBUaM1vCwryz8tyIfMbxMv8wkrjMw4aHTp6fuyJWz7M8SiLn74KWjHH3qZhTzUjN+LG7e/NQAOeyy0Yzax/YDDRwRfK+gwGuKQw/T38BCh0O3ptGbrBJHR3cJxGeAzlsNMO7mD6m2PtCkcPfj/jKlWTi+wd4f+Rbg8GoSAAAECzbdv1s23btm3btm3btm3btnlD3CAPWaa3z+sq7zV8V/sd+d88xL/TlOROpkrVsWvU69s5iC6WKAOH3s/343zwZs1bbViDT9jnGgH14Gie1/Fku9SxbL/zWaP7+Hjz9qCmT1CRIAVYqwQiM0SQzrHnTsmTr84aR3p64LtpaW6Mg3JtEpck/hcrr4LAdDC5059KleKMB5gTwSnEr1ukSmq51pVfGVip4mQTXqY0bn7ZRewhuySuub8GSNVfL7LB5TKxJ6cCzq7VAZnvD4Kcv59YZy1fWTtd7nHEmFlfznMjB00Z0R2TFCwz2wCTXyRrHZ1bmLZgJIpbxJ92axbsACs6twni1Ek4/SqgoEO/OJ7v1RY465Ev3umrurTadqdza32D8a5+0zn1MLAubFQAynqiy7juuIdt1235JFaib9MdpiSNXOJc5TdgYdOM5I6QpS9LBbDLwUCY6KKuiqfWB+hPudWMBhpHmBmvNF+tbUMyB4xqeooeIy2y71PxOSbt6q7RCbinF8T82g2xnL6ZVjyayozYxnMiXgOmxrvyWKx1juQj6hJnMneFsIkXNt6hnocwI9YBx9Dxz3TQtukdK7p3WGzhKKFNkIPEh+Nw03+50U2c94KKVr/INcshKjQHdbyBfl3HDef+l7mLjaf02ITUag+3KNGrOxTucLo076qHAqPw2Yncjl7QDr2Poz64SOZ+b6UMofEMpqugrVG1g0nX1M0q9NapZQowwZ90+LeoFp14UFcfaNxSvHIaXHH2o6d7W1hZWVyZ8zfYwVCkyEI7ogShGNwSSUAw9qarx4Ja1dobpcMXS45b8aV6iIAPBS2PUUPtPKAvGRxp2QmKbpcNvO0994HADdAUtPBORzXjsR5qFOp3jHHnPqJBFdlrAWFMoXjW8gz3Hl7l619s+PAvglNbEs0x+ArGhr89w2z0nOcbwC2oxTxy+U6Pk95SJN5h2Yrr/Ge9JWOLzNmKbRt+kcaskDUwdit6kw4pAdZtlTUoXC4ydufZxPm9nnpHZJp7vdsQIfSbusrVM39QwaZjsS5I7AU6VQjKMkePLqvwy+38MsF8dlKteQCJFrSmVuyJ9OIW8LtKnqJsH5BPl4N9Ufq2AT/vJMpNEf7uRGPIRUPUpTbKK8xKJUBevQpIGfDGOsXKRixeXwYtHYwYaWVcYXX6bgHiXuN1WKadEt26bM02JcoqwUKnpdk/BXCuSZISHTM9jC6TMLJSX7L+8jZZ88PrF1idyweGbNwqD0LKroFlrO7fmvK58UECZYRyeALrYWchq5JCB9mC0BI/yENIy4i4XX+74Fl5iVfJSLSJq9jddB6nBR/GgzpmAzaNshZgrks5shsJefTCIKMPsxpnFPhT0jErD+XuuA12NdgjRdYRqc/wQakY80qEiWyZhWmlbSQpdfwkjCKKYITN8lyUb6edLk5Q2q+YePOJwvnqR0W2EoviINL6OuIhfiTY6YqV7OyeWLEssiK37kq5mf8qE0LvU4S2jbdq1EkNJjss2SN6lWsOZhp9zCTfWYlaFoNjFWapXqJl9KvkT7QXHaqOryAYhGxClTbYOqjTVcxz4Vnq0lmYQfRGV47MS/PTpXj7jgJQWPp5Y+IhqU24uuLPJD6Y1aAiBvAK9ChZSY75O9QA0XxBFxk0Eew/vtDBvzfWlY6e53Blj11i5ExYQLF4qyO7MLnpSLl8Vnkg3pudwQQfLG/daY6M42R9BaS4OcZme6O5tZqUTyn69ux7fAN8Xuq5jwufqzYq5yRh5qZzy70G/dFonEX8Y0hggUcgL8YebxrELimmzesvl5tS+vzjJ2bvUNZ3r6cU3Pwdjx0ZmfcyKkjvaPNy1QjiTpWc/pLDrFDEWWOWlYSsNeYfy9UO3YgBfbvNwewqwFbWr4bsVlZSMiAg0DIX8GXjr6Y3FxHnRs5amu0WhGmzEFk/Um4yoztCJbAhernJIMR9UCtZSJcbqCDDuHHxgcwb867eMjawCRu8WAjjKvw1UhGZ92752/yQyrZPrNJ4/3nHqNlaseqLQSVitDsb3NkKt8E0BOhI2YU0yIVWD/PB3DU5TdOaSAvef2AJB9hYFS12x6ptjVw4PnpOPHesSRrZOJxVcv18E3BnhFZEzWiRDBPtvILbgkGYx0v5pe/6xXwTY3er9nlXwXtoWHTR5IAX8JRuOiDf6iw7ttxdVcwfKW8TEsUQ8ZfGhwf/A9ixMlI/ps/B13pmsFbqp9YzM4jqsi+byRrR0Y2BRYDD5XjKRTVhSvBT6jZQapzcXBPaSCfRM1vLSV2npaLJ5b2CiHmoc+nV1LKfUz6KH1cQ26K3w+yuIPewJQNZMDqc65UR20QHLxTGaFeU6hNrzZbwDPpARexD2Oi/E1hpN16Fs5oPryE2O0kW+SaBj7r/NRBwprcRY4ghAWdb8pVsCmtnyGedgFbLPO83Ocb5MsJrH3qQE7rwKwakcMg4bb2FPYWzHhWdaDH7Blzrq8wsIb9wbOQc8SJ8yYOOZPj3fhzEi9FyCFiSEphbbwvNjyomdyi/5MgPD8j7CW0/fL4XYA0XBRBDXlaOFqMMfyjP5htGDHEJDsyDcTE7bnBIzZzkRi/4YPK5p7hXd491mcLmfCg2rIDk0MZJN76dfNfnciLMXhg51yc2kTbCMdtip9FWipfvOruBuxQ1YHQ6TVYhJb0hAJDrvJ0Ds0xKkv+blf9NmNg6iGhPcNRbd/1L5TZO0X+L4YTYAWIhvdSbDDpU1uIx7i8pYh0FujU0Bmn82O2+1hS281MTvYAyt93yg/je0ZrkMCmJHFRx6c7tj5FEScq0BFEhR8htn3l56yDJ+wKQLr08s2LTnnYolT9i1m3a5Aj8BH/xEjXCqexalj3h1w7VdL6Ep7mAr33sbGFD/HDrWcVBERylYifOex4qVMieTYsQcSv10+kNH8hObGQRAgzejymB2E0aaQJYusp1T8g8auXN7zIL51ZMz9v5rtRK5jH8tu8Ot3MDFT7qyjtrEiYJgjrL5th9qEpTcjkl83ou6Mh7mmqfQHpmF1iaZBVLNO441DvjzP9FISh06Y7daB8RwqEKy6QrEZ5xRIYHbxReWcitj8WU9+/KCnzYRDcW8rBKvVhCWcFJSG1IUgORvJccid6YNjI8q5Vfp/IcJTA6Kv0ySUdF6iTbYIHVWzijlCSJW5YiSebkKJ+Z3uKgrB4JLys26x+kvhOYCZBbWeaZ+b+RJ0WrYERm2uZAt+J018kyg2tz1Ex85KEXu5lKYHizN0ZzspUNkn/YIavl+wq2dyFBEvCMI6iHldOeaSmMF8+JAxZjGtTQLV+aejrS+un+6rb56RHNnpUc/sKKrWUlp2a8/PStnT+DEI92fs4J07sHDoHW6SOQ+7cQakf4RhCDX/0L+VZLBAF5nGWbVm0gyOFGwiFTXO3wZa9CSYKc++gEiWvTObhTDmLotGwg3GuAlj3phPiQrFb/Ik9KcixiBdLpEVRuO/SMP1Uvb5a34Mlad9FfjmzaK5cANTrOMzqjzuDI4z9lqaY26+HWcaKpuB3x1ksljXDMV9LwPZEJwfyL/iyRatDFsYzuDGVgjpEQTX2NKt8AsutmC3GBjmdbcXZ/zJ4C3bTszjbA+8+ixj4OjEkcp1KanziO4wLiWq2F7spsX7BhNWh6T46X+PQTTqWpC70sy8qvBrdytiUYKPZ2C0XjYWN1jXGnBqnzIqbNOwRowt+pwYtuIiHryGrFacLEbae0QXtZp42nKXD3a4rmt3DFFWff5yXVznTMIxmMQtH+DbWXXg9wUCgMxttVaRzbbBrAyXdDo/QTgJXvP6Cavp3XVGPRQ05I8sz1sbalVPnYRZ9HaICDgQfeiQR0UNjodwuwLqYGgYSR6Ha/nzGmWptyqt0khOtrmyZGF2XaEpnR4HZGtTJhC5Q9c5cRuZbWLR1CGqNVjJwdEXBdYlBq8zSjNfvjrDEp5zQ9EijLioGpD5czAh6AS8O90tkQZiLB0TAuzlHKufIutwa8qOUnDuSXnQD/4QbAdEee6gLHskC1hwyLcaMp++6o4loOM8eGqmA7kXwiGbyxZ/UAHCDjoTolgnGFp8c8zTnTXKS7CKiBE33hKKYIBe3qYJKXmfaGZs0dq/HBlxVSZ6PG0DKf74L8zLTWwcYgMMRVIb5Ja5OCbbw+R3yTQplJqLytMMgkTwU+wz09LLFUt36UlCEPLnpjIWViOhou/dFltmq9EGfEGOMknlnuMaMtd7nvutg3XFwgBRWdf/eakKwJG+2eBp7LKjr2p3BbYVWVLZ0DAM1cZ7FeZPpZ/5klV4A7t53dGJyyMqTlGm2+CB0Z8Z+fpfF5Ba/Kpe7F6QiAOgqrqg8dKY6dNUHCrIdCOP0r1WrAdIBmNxkrNSUd1iDqSF2FcZDq8UpA66P7TaTEscnerme5kQOS5jW7SQRi9rh5yeBBapYOnx3Lk2mKF8XBB3QEC9CpvftoKBwGpHxNt0dpSMMyvslaeSeiowqHpHuSFiUkSEkNP/5YfJYO/QXPIOJg1N+nvysNijEeqOWALZ8o0EIY5j5gUmAedc4sKoP7Avv6i2jTZhVoCgs230z3YmrL2o8p+VGbPUy/C30l4epw2aQTNDh/U8JN7Cx0cZCCrrBecI4PJ0LAjys9pPGnVBxZL4/6LHLJRt3TkGkU7uh5q1uLYxUTCovoawRSJmw8M1nb63O0N9AlsTSnBq00Smf5Cb3k4GJ4pBAKrmR2wTqqFtljbfiZ+ha6E0/jSqbH6EMSa4JonPL4UF1XX9+TcBtJgz/ZXBxWMbl2xXcxl+PexjZm6+IEFNZ8UMBmSVUHcWvnCot3+bwBv8CaLyJ0Te00JdDnI3TIMlLS0UlwJYfUzZ0qDEodVEMs6Yr1mZziPMQG65swqKgPBXmpsBEr+hWDqFzjT8+Uh8EwG9HfcR50Hd7j/Trq0JmULbEWMZr2PCS9uozffzlE+lwymN2jWoA5DDCYoEQFYeRHclnjCCAEsTqEgHFi/uScC0sSSDmJYodo4IQO9ojINSskpGUubiIb155n/Vh1s9nKqKQw3tlK5OP4qVb41t4OWeAIq8nEqkhB/Ih/qKVHB5bRUcWwnWS1GLptcCxSNs4D66kkWDAgm8qOk7VnBY1ku6UXYWGB6diawxKTNzx0LoZBwpNA3A++KI3HBI+60vTv4qEPHM7359DsIShxNhn5l4nd1D8co9Z7X2JZiBP0HiF9RdIsTtABnVHNKUoL9KqrsJdZKeoNqMXuLyVj/WpALkZ6OIX5oaTxr4zUZJ1fFzKxVDAjc012pRlfh0R4ygpeYBvUJwR5djfY7sdR4vDOJqJ5bUu/Xw/B1sZQRAZ/bXg3g/tXacY1cElopoORMaopAR6PDP7mPmJC5m/GcFyk4UXMrQRO25xpZz4emjlbzNXLqlGUpBBwPma9MjFl8+nzHNjBBNqlVPmxDiivn5Bf9fgCd8laNtUPMbWN52DGY7xFbI7/KHxLOfSNmja9xJUtzoOhlrLwO5omCmxEsjI3qAa0bqFRpgyabtynU0lvi+eRgdJIbc2O8PbONdN+/q6OjlCw0NVq461ArAcLJqcuqKszRmNwiKdX9Y1mzWlYKq/Ah5XQeWksYngfIvxHCl72koNyru4yN8+ZIx5vtf5XeJn+FWFxvNeq9p6vnNi8H084LWrNFu0zUVOKIPiPMNBeRYtr9/HxYK3AfG9RDTlH2FuMHG3K/4rbNhLX00MpF+h7n9JcoGT1AaUPSWq5hU6Pv0bz+O/q9rqcgp/1ey4kfAitAgxKbwQ1UH9K4UxO7NeqoJPb2bYsiObBBf/q2EbqfsxAqFLdaotqsT+jmKUolo5UQhvvxwpEVX9BBGnJ8k7nEm+/vutZ98A8dc7U2clnnsiJS0r02dXQ+C8ts0pCCnAedzau2a2bwrGvUp2fDLVKkBcdHVclhP5uYlkn8uOGHgD/yQsbLSETWZOBwpeTh533aTLZOZYJSnFAVN1muMMwVqYwnrtQfEPScf+jh1S4g0CjER955YZzqLfZ8F/xcRoLTzz9Xtgfey5RU/fwOyAAxMUlLxKvB5goG8dChyw9h6mOmbSAh/uBl4yoC3id9nGYU2xgnZBOckXDBh/rHgVF5fSlao8+CIvIqIqrpBltdleU+5nScurLPA0n5BicEbUFI+07FhyqWXmq2Zj5VdvLt8assEzBecc9e+C92qYs52djkVgZEsidk6MxPwC9kZFd2YGZu+XFz8zxu+w5IJKoy4ZcuUwkKUoVv3MLC1zNqJJ+cnHHXwbCp87dcRU72HgQ3uyzsjY1u5N783uA09LDtE/HvLGbjD6HHNR1neYrzPxJm7DfevkUZpJAmTZSvyKsI9l3lNaK2paeRfrvJKhQGc0pPjB8k1516EhDPFNCRt+GW0fhfrmoouebbZGP7WDPGzGsgkQYQ+YwZguwypX7T+TIhFrmz4t22D9sn1oncYpjJyxXWMm/Th/oTnzwqKX1RtFHKYDC85WRhSMGlxnBMzUZPw3654Pb5uIQJcu2iI3vtjH/l/YnDmPZr5uVQTVLSbrRD/ioE7w6x2mjgS95HA8dewtfBFYpkM+u5Ja7uic5ZFU8Si8nsWZVZlXnyJFN6TN6dBtQYpL0pi3oqPdgVOI7ype+zyYsRZs+5warz6GiSg58ZxBTl6lyd4gBh5B+bigR7j/JAPBuApwpVkv6DMLbHW8g4IggSX8MjIkkR0GcWuXLo9/k0C9o7L76VO0tje9NuJLXbdiF9g/5xmiIZWH5UVEhtfza6Y4MU6QlLyq/ymJUF4Q6SVkxtorvnkyk1FVi3ebEWpHNuKGpsgU1AJWSEaNUGa2h7QAuEu5nFxZSvrE8E7zWLJnOaPITHfdVr/NnfDA8T4+wr4iOsxTj1gxzS3IB3iwWVroH/1tMx/F+hAHQPYv4N79taenpuhM0xIB7hGmoYxBYaC1XRVAxey3USjU1n5Qy76MCcgtXQAL3Em3RNcLUAvHFn1d5rXmNY+KRFUrdz2nqwNoRfJ0MrelFxo06T0Uy3d55hc2c36heCiJLC8WOOZMHfim0Bepj7dXiM4SzX7XX1JJ5Bta3O9VqVwH540zfnHwfhWKaSaezKCgSDc+IS3PPouq9mbmuSJPMtCTHzHmQDQb47pWqlmLh9pearkFz29UItQmcA2uVrFpZBPxpgxmRJoEBOqrVbyHBZPA/5n+LNQjllQCfikizvpTi/mgBJrZr9FzR6VRGnSZGozGrGJwnJevyugI8jijmpI8uEyIafcayzMt81OPLOKBxuWImiu4WWr2Bwjx16q6K6MyiSR+HRRFzDFHhbbnRizHj6kcPpagKoh11eKxpK17OLKMSr0N9Ic6TGBN3SfYgxtDkg6Gw9fL2uCKFPGXYCsfHki4n/q07jMu5QYFwu8wkZd325+R5Vi/LJF1+6tdd9sqrZDtUpRxqnTSAY39kXmREXV1AAFOyZXac2xrI99VdrU+BIcwTdboNl0imaH7UAcEBpVlhoCbEacxoBrjAEWzhTPlQvV421kuExYcn4LFHP5WziNQPKLqkDRMu/656scIrTylJn5/lez1VsKHEbGSbpHlMquP9SD1+HM4O/yWqrBaFeosONS5vYb1yIWuUMYKxvFl4iAJ2geRgT2eGR6o3rMh7fO4MRM196uEsCZOvYe36ejTWF3hVmM4rkQB+ZNj2BggNJIQuSk75Cp5tnxKBw3Rhy50mD5L8PiJCk1MUPbQC52MxU2IxxV6lRt2yThkMhIktBlqmJearN2KCg7NdwuaUSoQF8UQ6rFX+uSBgkinYdMS6GLTbuagCjbBW/Dx4wFPo7P3NW/eAAmB502Hz2SnFSPslrw4zIbBDoYFeoKz+krhmQov5WKOWuCDYUfk3eEh06Ibr5IrI0bPU1b6JfbGVdz8DNxs+rkvrgwdBC2jomv/N5kP9JkOEw66uyC5spfQ/XJs4fVYtnXnC72QgDXAja7jZuNkhrtas2424lvM/d0Jea7sJQ9QcQSU3snakpPWi9Agi0rcGs6eU67Ei383lz6YFVNiGYfOXmzcB33PfTT+kaAWiL9/XvX6qf4PWvQ4WqaFD80fFOVw+uexQFEt5ZdoQqY+Dy3KxNf0gfZrcVIUBbbyN0JlAZiM69Sk5TMAXL6PvS7xaGcNZwGic4U/+ZHyIi/OqF1Mytf8Kq4qgFJNyKQtBAUYyWkG7Ym2YQpzw4Zr6jG9O5u/qdIVsxgeKh/GGxJ1phMFfO3uxeAZC2swtrh41cJSjPdRaZ7l4hrrPN/LzLoIBSbK6IZjRo/478XXDDa4qO0UuthZ6xio1fZ9y6Dys8rXYGFc1WZ3gQeEgiUZgbzqasG8cM554ronaRp3LUI3399OBlAxOC8shj8G4XZ29VCFnp7hmqBoVvSk04G0udDlKbQIGb0jdEcdyhP1Sdvi6hr2CH1ET8cBT+w7yRC5mFp1/3DnVTZrFkfr+fMt9CaRN7YuSo/t3sH2sk2s6eTfVordh7W+WjA07SNruoh07LTOWK97DMBp7Jh9oG2zcp6qzz/TEbmO1iWpT0STXR9B9pS7o3DHXYNKkfHbArHHqWQt8hatuFDK9LrZuz1YrpOROO0zw1352LpGuL1EhU1kIDXXOIUQ9a2nsAFyGJL0tpsmVtTPTNnbZw2Xek1TLEDEdiIlF96FbhPlRnwK9BUh8SbzOpjxFmwfZHnz3vFAG0Y1BNqrxW8L1QqPQnLd/iB3glHr8BDwcUKGealRfoXsCrBn7aDGYtyz7g8chncjOXoOffJkHNz1AXvv3k0sX8MkSNxM1JcxNpbVxe/lbfAuTqQ2MRP/zW/Ol4SvA+SboEptJa2aTb4iHTHVpPBo5b1P41Eqar1JqILLTONF9FSmXuP6afQYGVa2PO0LndOrcYghXoGEgbV7rDyIZ/0TlLZVI9MngzfSpVJ8UHQuX5CEA3ZK38hgCjCJ5ZiQ7AzeytFEpDFysa5sj2x4BQeETtaeUrks+8/RlLNiYjYUOTGK8U+ODY/Abc66eyTnBYPK1Eyd9cFiCA9f3ciTdMFq7ExZyjLp9Ru+AicLVm8nrN8r4cf5nghjY6Husw39FmxV6y38yLMCsRkBqN4dK7dfPiRFe0KhuLYZzs6MLpjOb+pMpqn5Xu7zPfMrdc3XVNESPckOx80nlcA53mswVMP2dBIFQpqVI6dMI3WPrPgnbkTe/wweNyJ9KHAs9SkoovGQZ2lbpx1JkQzslW4lq2b+53PUYga1ncr1nJkCkPuPrEX41ErkjKp3Q3w3HJv0tHgdI/BbmJd9ir4d3KC+VC4vWOqei7WeomZ3IO0jC13o69b3KOKGQI/gluddqs2RxD8OIqJ800kRH1u+jxRb/OujuFDtfFEDTQSLjNfDSqgi1Vb9HtKAj0ODI7/BD6QPQHd0FCE6R+QCkIzC3RUhrgTIs7z95uHf9027R95uAzf7cYbgr7Xl7OYa30WmlENHaXzzABEPq7tSDpCZ4AoFqCCSWk9N+Ut88FMnTWmb1plRcfGoTpzOFWKcOPA+f7hBwm+c/gTclKEDUmvBift5Q8APEHs4yhaHC9Zu5ajOoT2Sxc0NDZ9n+0PB0gNKCKEqi9Orf92WoKCROdlUQ9lGV43BFnZRL0s7ZAX8DEYFAWyQIiQQIpnwKHcwNEq0RvOcDiHycZEkH3ayzaOfFHkjdRqgfJM4fJxs8Qw3oIlxtq+Ez99DBqquiu5tAX5fxkdoICEaUPWSNVxIFpt3CXsepQ3ONKOILXDMkJRpMw8SEh0dr9p4KwlOir/WtKOIQ4Bd5zeadlByXO+5x81ILl4q0kvnzqMTADk6d2M+guiOFJE3JniOJV9BsXCJzrlvb8GFGSWB/6kfL2SqY+YIQIkBSfrEmk3aRtK4RnxFMPpSJFfEPyVxcYgBrxnJ6TYGm/DVIOffMYi1Voc+cLsm4AE8qruCSr2gNmSCWSI7MW7aYRW/brzalapZ38o9k42J8VIYXPhjBQd7wsFPOvKOb8ihNTLU/XYzJEvqEHBqz2RYLYZTP8myCx4Q89VsdXfD3gghbNbMhovpKbQLDUxEIMVqmV40D7MDYCpjUHQdZEdT5BhUOqN0rDRkDum8c4JNazHaP7P6NA75VH5EaDX9FB9OZ8YdChLR0jUYd8XPffC5SI3tt0dN5afqc68MiR5UwrzDMzjqq7kPrMfPuGGp5bRqxDV3VjdJ1TqLK8qOmfjQ9yXMjUw7wgoE0NrZFJdvGwtEx0l4aHdpNk0xLhHy5eHlNsOvH4fMLj4lVmm93/1y3GS07RYq/Kdn6oEuX5Ykna34JXYYJ/GQ+8YfZcAoMbeZE+FCVQGLknR/qyTwsJchouT4LDksZCM/eq9Zq1f3ZEOKYIqaooojcInv2vjMG3eDEX3PXJoERwDv6p0+D5uiJPIgP1l7kcbpKXTOa8bkQlbAMGKCtlfa0nwocJvF0Vn9++YH2a8dGBsUhpZCJW85MFCP92kBOL+wtV3MXYm8uJMuh/bB5jJS0Pz59AHLqXq7iodIGZxUYt3tqIS/y77M7OSNyx3uiEk6lwei6vW/KcySN5jyhAafIL3gfqsRwYrbRiIemZI0SlObcS1nGUkoKviFKK8iBG6XsGrD097wVRgUvRzZkySHtpMQ/vLARUboueRtdnyAjWi945k+Rf15Q1tkjWwv5BktjEs/PESWK5cbdn11Zf1NoGvXlp11ps0ad/t8r+w3h0ifOlUoi6ZHKfEXx0f4JDYH/slFn9N9VwWFx9ZJeLQPgH5kY65ON1f1qiuL6B3CXEnFvJORwJRhjgCGoz73b7NsLniojKImjIAAGrY4r47Nfs1TcQTA53fO2cgz86buAXHYPXDCMttOjWZt4qnWN5QLIVt1h2yXbG8qWKeMg/Y/yIcUNSbemvRSeryiBziWHwRmhK4WaiZFE6xKFhxzIo6rr31HA78lcsZe7gOl4EQeZPezLBhRb0M+t+szGFdu/z6B2gggbp+BLw/kDsMfarMnq2SJiYSDWsWQKHS7/SXMWayHVNys7BrX3SadspFKjwXSasYajQd9uxcHtlRJNGN/tSMYjrFv76fBYMYee4Emz8EUirtpwqnqqGCW6GYUKK3q3CWryqmUsufx5jEE/qMmvsZPMmOFwBvknpfXTEnKSw37arcU/V4PTsZWJv+DOEvOqoJGy+OSj+3Dl1VL2qKSIZ7zJ2DBlmrB5sU4QrLkMHH/YQIz7OY+oQKIhIDuoV7Mm9kWeJ69wiwTBXf8dLeps+Bl7kU9eKBZ6spSTeC+ckVVTVW92uhF3Zu2YZoU7jOwB7qvdGAdAzGHQoFYU3vHyMAjlwns1ExOovO5bZarVTyzZVYELooZiXgNXlFLs7eR8eswPz3P556rMTS0XFYHOHG1Ot6VSX+F7se5RVrKq17DlozUrzTg5WLQoB/vmkvaE3DCpBGC+50UtwJOOANVJ1VTus0XYgVSXTmdoGzPvw1B7dXciqb1gxvQ6ogJZqu80IMhfZ9pRqtcI5ZveEMCyCKki8l2bv9Z+RbHKGA+ufOzKte9/FJbRxCi4XZUfGTr0i99bB7UiNKV7gLjbjphyBKhDwmCbo5z0VtHvTdxyQROXtRrEuOkto0UvqC/5QHdbsXNLoWP6aAwbePWqyWLx4AqSUZZMNSZOQ43u/piBLcIBc1ZhzqQRPPna11X1tAUsq1XIyVzlPQBrqW4mARCQxRTleA3msAmtijewPlqnd3DG5TMTK+yg8UFVIEpqmBFKSoFMHCFlT5wNqI8JTt5U6F2Q8jPEQHMP+DZfI8icl4xwWuzTZYod1cpWsfiuFcJfj0Rx2AJ0eytElQMlAYc9kq9yKlJg6UBR3INHyF5tKA2rV1ql0vj5nXy6zCnt1fexEph/KQHHdNuQe2yssDsP4bj/inrPqPEUPpA6tTBKpYuh9nLreDOnDKTkn/jEPwA7QSuNdIFt1tNNrvWriLge74RM6j6ltATq3DU23VrjJEDmq4xPVhEzU3vQT/UOpMo+7WALKZ6sdw1Czh8JX7KbM/dgd23S/8Y8o90zb+sZ5T3DgKXzMDyM3P+J+nA+NUMFRLasz1RjR7VaYH6w6VcaFKQTq4rf50COLlT/+2V11k6bpFssTI5cFxAG/CFdRK7OzVID/qIvO1UNjYbxcHlwhB5xd+CMJp7lsygNEFqvWR89REIF6nsAoidRHwTkjmVQH/ixpxOzV4UUQPOegZfyhKGMI22B8suqIvODCAXzFbyNQ4KpQxZ9WD35AhG1BNWmvyks9Ey8lkIxWGcCo1G1Cg7+R2r3uzMLmxmTDkPy2pKbKfyGWVjtuW3HLs1fEj1FStEfIiaeQ+IZLnu7dcSBPrCsjUZ9nWkGC9GIuTDHgJz8x/R+pBnIvFXUoAP9AIYXeQavK6+xt6jwE2fTFx6gC9Djxu9uJ1WRBn6TudzebKQp52isGnJEAFa28rDpA4W/erHBAZF0/RzCXXEpbMx9G0hnwybaKJb6c5F4VCXNu3x0B+spPgJZZmvjYGPRt0boI9F/jFvieSZlp3EDp8qXSoSPYeMrABQIVewCqp6N3EyCarxjrpwnf9/jtcU6oNVln1xoxbqgwZrT8gA8bSIaEz6kUbCYYJICRPOadOYacbCEWJYzAmbV8suu0wOWq6Om0e0T8CfJwHFeSdBeu1LSmt9GyRZPXVZmlfF1u5aHGg420selqLFLE42+GaeeVSxI2AFrSsPxq4FRJCttRTK5BG4vVfPcT7i9ViZlGPSFSqGs7h8TCkUXScXzfZ1TxJ9LWTHnKtbyXGKn5I7qtwMovZGvTHU0fE8NauaEXKafOEH1I5OTIZBo0w3++8CxOfYp3aR7kNc+14SNIhtinlBR9AmBBCMrMVsnR77OaTHhXmCqYOP9TTIqRXQlbluTlNkSZRhGmGqrzuVtTiJ6LTqTUBRt0gKM3Mh3XVZrHlEh0rniCLMIXYzgjQ5DC6F+8xTM7ucnKJzNqhdznhDgBcCSFVaGtJdzKrQ+pRxX8+C41FJfEBECsLsOxIIEVdoGLxuOvFvkQ+ExzDpnLaDXivS9zqEMqXHbSOACMxPcp55Fddhca+rOZuglVd4gdyV23jpAcMULvSd0MyIrU/v11lECAogcTK1jAqObw0H84jYxf8hfZl79ftS3iWoeO9gZOogzecN3g7eUjgXBwW+SGnaZK8FpCVSF0h0nLvJnA5kC4pTOXGDKVKg3bRh+m/SttwH1lll6lX4dRFkjvjgS6NQ1TtII+KbBrEO30eBiT3csAoSKsOdTB0Zgtc9JWygcNj7f43869aIxwVU4HIqqzV8AO8kkn+yf8L5LVfGIM6VZ1jT6euvc1V3T3XSSIYRyUXCsXkvmRAckhrCo9+WgXIoVxnuDfgu62vToF3kOayHPnXecvRkhE7YqAsbEWW6CbXCecDdsEK2j8Dap663cgYHT09hyVmJu9emQP5BuLnYQsSZeIdtOSYqGArJ+z+IXIUw6f9KJ46CLOI7BDOH/+YVHzrvQBgDg8PR64SOrWk/knb9pMpLYnoBhF+WCD8DKBH+Pr0fAKHPdemL1qWa0rNLFcNEQdS2wsgt8Fa+YfZ+/y6rD29cDZ0sNArNSvNmDuc0YdYz9zvqDOFO0IYwblxai9oNHW6f+4HGkM8fYvlagABgTdwCgTpFjQ+GDjHFde0qeNbaP2FSMZl39fvnhDiVd9PEBNoImy6RJ3eQXUSu48jzMsyb9M6kfKX+qpPg+yoDV2pVV23X7gISQGAO+KljyKDUN+0/u68/CrgqFB3Tiz9ySHRkeljukY6gr6F+TWop3r/lzeb7aodPMfM29tuPNmbWdQqwBrZsIXlSURne+YWVQNdejHavzXImYaDSZNaimwwnA90nxRGx1Gi25WYnh8Zb3/Pf9Gfu4hfia2ox9TJA3BUUNCOxfbeAfom9yP7l+dhanefpBmC9/yx3k2jooTyDVGpcNxFRIV0Bqop97KFggPh5n+Si91h3ZYB90uOXnvKr9hv525IJViMF9oU7eVRmEywL6fMjZU7Mw5Nqk9IjOKJuKCap9DQcFDMY/OhnYi+3vP+TCwfkFvUJIZe2NAaDOmHK6RA5zB/KSOO4teiYL9uOAYxpPan15vC6v26CSdYD0H37MFhLokSFQnccE/w3pRlY+lbM/XKMXg5pkbLNN6rmEcaay31Nu/pkgb/zmfFG/+bYg1q8yskj0a/c+iMWjyR+o1sGoUD3kIdNEnmUNyUloMYebpmHRMH1HtRS03/q1aFlyIW97Xi4xiraY/TACPvoGx3xas1yJVR/EhtUJwMtDXZlC4CEtu9Cw1eYq9ssmqEIvbjhWSBsd/7r3A4rIDLPlw2w84HMErspt9uZiyI6kMkww/K2e0L2KmCipJkI2b76lpatK/Dml5w9EmwphzI8xcARu1oEFsoD5TQT7sxRyj6V7BJHd1TKYWCtG+c5UJEamnnYyrC0XKBZdMettTooohwKx1ADOYXzOAxk4TKgcrL67AUpVn2tA9CMYTawgcyoPPq/JWRA0kAEfh0dCL5lpFuLMJyDcTHPETXyQM8nfZAxWjezcG7avlDFb7MKfbtZfD8yDwybhNGCKVuNKs/FhLYiKfM42FBM7geUSKU2GilhiYJKWeN4FQULHRI3uOwFhkXy/hNY54lqcDbmLsNBuB9EgfekdNvHvUKwYDgOoTLYTBJHg3AnMzZogb37mHX0jAm/ZIlgn5IRJSDABkBsfO5Yf5hr2guaoaYJHdFVTCI5m9CD3zUq+oaf4tOwq2R/Td7u/MekjzS41iw3Pr8jROQkRPersZETBsyjqDdxXnfxSHOkVmSZZh7dJ2cOyzx+10Hb5NBxVGHdYrLIe+jOclDl2mE2ML39AW9QQOrrSpgHLJHee1ctqBkq96HnP4Ilq1ikOdiwsmru+qH0YpujlBBkSIi5w6cAfuCecp2s+4vXI/CnRuK59IJXxC/1ox2AwdVm7RZOidpsq/tqSlSs/Z/UxxdciRUoScdmTjAz6Fk2Zu0OD0lwrTIhvmgjpe2h2E/WJ3oiNLXCExXc5DdfdY1Ys7Y1qD7F+HWc1RX0WYQviDlIBVTa4SpqLEdGfgNwX5vAohOVHm8k6hU62FDm1S+U/7KiVDEkbjT4hhKIMQhOa58His0wDlZPFU0aBVg1bafPsSjl7uJQQIWgXIglXed7L82+z+EwYD2VsvpfZAgiLyJGLfxrc7qTXlktAjxAgzthQmCqQEqzeQSP997NDCmR3viWfiYZ49IpsSU3XAYQoBrMWSH5uujgSB7coKG9Rk9kypvKvPsjsoBFfKDWgZ5iCQYBIZOqxJnw3K/e3OvrpSFUStMrYoRQR3pkzHwWFtvPe9W7fgmhQXFvtO1eTokDLaeECjShg2poNsOPkBqw8pdF5u4gWKTasQ7kukWKVqoXWkMLiatMplKkQdaTvcNrkYJBYE4FV7dSrjKw0IhVNxJhCzXDzjzZzav0ZqDD+STygyX4wO5qf7QEEqZBDzosUCxx6rXkDimOHyxWtddbYfKmc34eG54UwKV5fpxQSMMe9HiiGbujyKacTI94CCXonPh35h8Irr9v8qZ0KRUXaOaLjVLMFavD7FzXXvIYLtsgM8ni0DxtZ0RqE/OWADDhH71CoKlLZbXD99KLM9us6PAjr/czSoDLalpPtyFvM+TMoDcc5XAlxkrkKaApf20CC9vu5XYVzrDU/X6TCNKUDNHju1FJGerI/FFjkuhNuSDyC2n8qIwuWSGiW+66hEoMvUVwbj+Blyr6D2rDkYNWF7iGE+USxRaUpV6/UrRDte+Rs1TomiqW+C3uTq5Z1gp3T3b9X0gJMLDiLV3urRjeqD3LFEspRIo+qhry1slnBpe0e5OpeIbbkRh6pDWyqrdT2N0vqzfOcyO5U4HTUnMGpYrYabFlXWcIzvbaHhcdljfms6vKXujxMXrGOsaHAeUh1eO8Ptkrrz6v/sGXX2vyFHUtk/OzGyvs1yWlcLgyUhEEd77qZCLxtnTYaLxguGIA4tf96UzsWMr45BOrKCNEKyJz4ZkCmO7Q0Ctw9qzaKZhMAIXNzRWOcNiSsDmphlai1j2tncoyH5VCM3yaM+ORn599y7rp0onzlOYUkMDj8qgzX0yn86aBdueygXUEKA3qHBhM5bmBmx7hY8ZJX29WGzkHIaAHhJLWj/Q0Yx1Cse81LxDGHprSZMElVVgBBUU+0S5qLq6Fe3dUw/9xQONsJxyr59+KOY/T0Zm24BCtYukpIJCgZs5A+TqZMCcofcgD6B2ktKC0tE+mnuVVHvwFb1bSfluXTQi6LUEvYhelYSSsZAj9F01ZGy0CEGlyRFZOaPm0w/fTe7oafb0zrocxCcq0qWmP2mmqM5z2bmYGrfH1ir6ncW33N3bLVLy3Ly4eAG0foPdBsY5n8wrFqXliOJH2weUPJSRlgohSseGFU02FoY4G4svltKbDzTL4kThLRw8ri/EAph5oRsVRydPBLXXyrVlb3njj8PuEsKnJSqfvsc46jYtNm5YMjK6gfYwrsJe5cpn4P7x5y4qvu8nWbG4njDZKHVHlB9qrn/LMmYh/GRqHkX3WVQhm7jbAQFwVU2j3BngbFlPf+TqlTilaQNp2ZakeKHjkxNNvWVJqe3uCU1qVaPoZ0jQ6rcAAa5Y81mU/1BONc8N6LWtQgiBSJ6F0m8sLWW1jh/dqlYruxTEyW9A6aRFj2Da1Uz78UWpcK3bNeI8zIIr3zuIr1/z1+6IsqAkVjk8NeSQFoPVxaKHYea94BK49+kXU1LR5hCXlPUcPwg6gbOLDa7M4ChiUmzI7FYcOCRdZIJgZu/6hh+Vd2+4QP8Gs1xgBtNpkOu/dqwgIGE8T1NQ89ubocSuBvFuwtDtEJtxfJi++6+lBBLpkRilrkshdzZh1oPnv+5o5vRWd9QbQBqo2KUQlE2O6ff2IOaXOkdE0fpWFAJBeEZiRakMoxToLDoPKQLzYUo73Pb7HsocQzBejSlOP1p36+wxBl1i8O+baJKrh/YLSzLE4UIx7PgZN/kUUym2fDEq3mqE4NA+n84+HZA99FvNLwBwZGukTBXfpyF6HYBmg6Fwk0rJ098TiG9y7Jz2GKpXh0skrPqhh4T15mvittPpv0zldTb4M/Zca5WelHrNkHww7fu1JoNPOGogvBn7pX8s6Whhw2ab6zDPLqEb+hqHF8SEAozsoJz6ECuAJuqrXaxc5dOvF69kSj+RNhmSTinytIVjeD0EBm1CizY9tXGkqpYSMD9cYIclOCcIj4BbpJdztzKhq4fxYAkxxm4/f+bW+YkmDfJ0Xkv6l7HcPVa2iQnibx0Ro3MYVJ6UHqkY2lFj4cieXRBkMUpXwJY6oX3Armx87cBpJGR26t05kNfj6KwXPBpeudpcIpFnGKn7YW4NdkRoPYhCCt/Afzs+01hM2KVv6GrbVnFEy+leCGO4msuXjMXaGjf+AAw0P3M2RBEa2oiAoTNmY8robYOyhnEMQEr9tE3sAjW3dhqvtb1JHW+fm4yfD1k/6XgV6BhXNgu5VI9VVquIvS0vFWsCrVLoGxBuxbEuA1EbUPYEZH2J/peZ21XKytlFwYYQHFlBdvkwDDen6Fa2REtke0w16nF+AelH1HguONH9Y80I7O8YwzD1lwbgf06yss01ExA+pT0r53lM77jTRBENvL8+KdJaT8s/nsBZ0fmQBdMqJeJtTO60SIM0IFYT+i4Ku9KQ1muIg1l7IpAAUxyxQ3yGgc6UTT2QzmxZfczoNXUAdb+qp24TgI54pHdRxBohnYDSAmV8etQB0e9ba8m5Bi1GYfCvNnZ9D9fqh/mD6q5m+MMe/7ih8KT5uLtpQfSuNbkA2y8fqUr97RrnBVpED7gNNoBVIjkpx6BtWxg0zpqkAET4pZqoEuBf5Ll83RVpidNJfS25F9NhKZuNrr1h9wum+CQY/Ug2phFWGM1e0xfyvOuZpytXKu/E5iK1T9g9NWGRtXcHb3cAsoyO0QHHeL93VY1f6KO46xAPHEDmHCkp0tEV08/jeHLU+lH9BTHtqHPO7R83yuXNVR7pdHj/OoDhm9kjYKlFqIi5phxqNKuUu6rV63SjXJ4zWE3uwBAXPV181sYgJPEl3YRtUFZXK7B5SMvMQfgQSIxF7QDAluTsZSuAqKpJm0dSoAI6M9/Qwj9tu5Q1WC9CtdSYS6qO2iOLIKtlVqvPfBYkJ4u0NvfpAj6INYbCipa8DkCAzHEijKiTOUkpbhgMh1lnMCvjlBbCntKX87XJ84mC7XxakwwO7xllUAxKf+NuLZHIbkKg1fH8YqUQBzqeVEQeM6fU2sckSZQkCXIhj8MNrKrD7voaf6R+HXB6kalrwoKUBmeHr6nJVmEZwHPRAgIseh3axVs/KeDYlSF6WaxpglX78y8NIJL09L6NFT9S5bZE9qFSpFYiGMbW9FCW/ujMYwDlNobbRXe7gXeDZaMkm2L7JuAnNc0/wbwj81EPCWldiSO0BASbZHn3y8ZP4Em2BI93H5gkj6cN9xQrZCpAyQf+p38Yh5jlDC3iXyWI2HVS0tae+FvOjaMEmNqa9b4Ndol7E4cfPrYkZxLaNUbizfSHZ22iy/PIXVUpWxLliW+afwei5QiN659cuQAxqzpwb/K86CYIGLTfBSuCszNjH0Jszizk0qNLO0AR3WBcBcvgk3dm/Dpn6IeLQ4pF/JIng9rduuK7rlyXC0WlR4eSoUdUwi+3AxZwFHqoPU9oUiC7zFu3WXXwNwr42WaXsJMSOxQSpQe8q4FlrlGV9l3SSH+UD/RdpZurpzYRWNVyGl6378wBhoEHZAXBu+5n5ozeDURgeR1UpDINfMfgZIfhYRc/wBogtSYcVvX87+3tbRxVeD8VRi/vd4w+SuRvTooPytrWF1ORNwDj3a8HenhRD9LPiej5X4dotJJOmREOrOKSKs/Gdba3uA8lkXqKz4K+Tq718iuSVX+WKrT6nDgK0okqa3wBADokCTsF0hNtnBG0yxbJqEOFXm2FJrz88mya7pVfsZdXeNYGKwoTjZl29otRul5R/zmDeAAwvhcRFjYGbaZ94yZwo1KKdWkEZH/SSRo4L5ZOuO0BaLCYjxEpK9iSEmM3pMyoHPLTVzWPq9e7kIfTLgOObftqidftw05//Iv7V+sEKSVX41pDFbAxTknerO1P+4hw9Q/vSHqeDmfIPnrmvWWStshBWBxD6VAd5PiH7DuXWd+QOWjeFWi5cv9OZOJd4/GdCAzMpDx4++npzKOoM/0oxU64qzg6XjCRL1T+F76Tb6ksXSJQVLGa1w74Oni8+K1TVjxnu9qCw/g76b3d/847C8g8SEvNGummbxCx6b7YAQv0r8OkoZoU9CoBwtACa4HN4nDzyMW7/8Snd2fy3Ol91fVF3lGKdCA4mrvUkMCHQNgOuLOdZlV1s1eepjSdq/UCO+tBUk8B0MB97kdF+BDSdOm1+3+jj+w6ncuxGqzsbqI0ZFpjyuofCBvQNqOO4ZJUT9GlQ6+gK750tfDf+FSXnPo5ugGL1Up6v7WtXcpP57Av93cB/MuEN9SsWyPvIECdLDbZ9Uh1jstNeGfkUTjh1hdBRQRC24I2UkJqLYhqZXy91ngZVUGwAUM6O3qOSB0Qzl5Dd8JFbTIWOU8YYUx1nKuFE3oWihzwcxCW8vfPMEY4mnYy4kDh/IosxN7wLrUvoni0dlh4gF2lRH1kOQiquZqv6f1lGo9w/jQ6+SGSpTM1JDg82gjKiW/CyZ2wGVRkIxP4nc7CkWaHCtYg7Pxkndsx9y66DYtV1j4WD7+XDr71spmPMWI6W/7AQ9HNREZfifGGU4B4aPn1ECkLeSRkxGToU1Ljx1B6Q7dQFYLzbItrb/tgiGtETnCi1aIGw1O3l7JDtf8Ry/g0LGJUfVA1eubQpPB88Ash3fE7Y1yDcJPrwnSZL8INxWGUvsq4N8NPc9S3ozZcE5u8dMbT+ErxTphjt5qOV3kMw9ca05CPkx07KqZJjZ7pS0xvYtCXnAHZeqmd39bIpvXZR3Wx2Akao85iiK2IVrGtwPIrhLe0WQ7bmiES6avDfqo0pP/x4VLxTTtCKKbs2exaMiqDbURPTu00cJIQDbttdXySeNY31XGm26iEAAhOdczMK/QCOetdeP4i9sWaK+9lEp+tky8ct+Y1FbL4gEsnJwZEzp4BnufBoNv3yDpQi0Efohycxxe5vZogYmQtQmLmXZP+EoXtxC04v+f0ozj1C+NjpJJ8VBLT7Cpr2U2bVYIst9vRm458P0lkBwLUKMgFeX2h1l5Q8eWKhbeCzC2MX+Y7W8+TB7SioU+YQAgt/C7id+0Q7ri/YNauz0fBhYMDGq3XvUtfepCI4SyF9SWtt8vSxAySpxk0ye+/xDxqUx3XZxB+ZqugnyeWCOCwywqNp0iJI5JtQoPDBiGHr6vo9mLQYAhYtiFv/cLytJ9rULrp/09miyQ6SMIVDZgFjHfdSHk7rUd/wLD1qvZFOIyDo1u0+S2Mhhp8vFY7BYXXDjrAR2Zrt389uitZF+8MxMeT8Qm5OwBP9sw9+So1845FWqqkaSHr4odQHQYd0iCYye5JGQxaAlJlp+WOlH9jnK1EeAQfHZHhOBKMg3iu6wyHTVs6IhbSpF3hof5Mdkx+g+zy4NPm2bcXHYCJIZcUmN3vtoGl5/HYAeixuLB++HCJtKcZKyoz+oNlT4L1GbcwlW7W4JmXz6+mPuG5aFwyAGDUUPNnIVXRmR8gS8huKI8ML2Tj5764erIPN2JLe8taLOoKOX3lkLArXktiJ/XLoY/yFwcee/+eKgOSSs4GCG8YeAAYp81C6z/KQyELNiT3OhrzX02OoNkQ7rBM25gY2Xkqtp6vRL0frCRMDY6mqR1J2JVOxDN7rkUssx0+zOLpnGf6S+PiQtULqWL+TiuFE/xxB3xzrdwxEVqlamLkVFIh25S0Fkrc/Ih1EECma4KHStnyDip3dVZpBt0nlSZ+cqQzqA6PUMvb+L9XO2IaW/QDRljA4b9UqDZOFC47Qyt84NwqVbr7DWr6PrOIdXHXPeXpFtMCwIxrxxy2+aWC7XC5InItA+kriN1+hAG4kwlv0IzdEqETabYWGJXuLqICqchfinAvTCjxPPb1sQTqZLiTaQ82KKkvTv5I1zEtSROwdivm6RzVTdsgUqAXYJMq4wk/1f/w9JkaJ7ylYubOSYtwTn+UO78mHofFLjq12fu5kNtgFcPahKCMDDJ1hO3haJDMmZclvNQ3ZXXr7ON4IBl6Q1A4kvF9Fzy+awJL+5YFQv3RC56n1yMaKlfn9GjCBWCq+xkzvpobhC5IlvoS1VJw3C1SiGJx4ob8q+p42AVssIqFovwQvcjt9/cFIOynO/10jfWtsrgNqUwkMtEIBI4eLu3M6xwHlgtHfvJzgsZwquhhln6nMycv9TpRWTPzCr6g2a3DmVGwQyeTRSDsNhVV04NDyR3Gp+71Eo0qeYzSD9d4+YZji26I/XDmeJ5Kg+8wcRgS7QuBANqAtFMsS878qVno17atjfVvgsQbNKVxhUSUk08qDQgyzSBMTHyed+GqRWglwFjcl9iiRCDLp5CFFAWIy5Dqy9SqoUE4FJRYCTjz7vOzgSqUwB+8oh5v63X0hDLKUbVD4QfqkhklUbv9RRtpDmeMkYzmoLSa5GL1BleMNRCiV2LSJSEyguKzDdF3HbcsHf598wIf06FcxuODZ1pO4zmHddCp6NUi+R8na3x7Yo2OKxw7KuWnkvp/SbhU32i+TMiNcp2MOizoMb6FHKLx9++CRR/55t0cMd/ViM4AQ7Egt9u2wvPhKN+vE3KB9DM4h/RUcBH+jIFhAKde0KhNopyCx1vp/d0+TL7LW+v9wvx4Pym6aPO2nvb5cWwL6l8VoNjh9yyFz22PnYZKyhyaKONtqzei4acBdHWcGw9WKLErhfeCD2H+n2YBiIAQAAMLZt27Zt27Zt27Zt42Pbtm1bHaKD3MAa+Whtc42J8J4MiEOF04KrsYUFiPJouUvKlAcdsMrxmM8KwrcDCcqloAdEZnnlAELWjLmh6MUPZ/8UZquFg2XI5iV4iuTDP8jo1R37UoFzOlNSnIEaWZokCVNSinUcP35Xh5uLl9qgxm8S81Nqv8YlDPsjs4erB1M/92uEXesbZyIGKWEcMbhsl0mhbYW/qZJVkiIs8ngVkMuZSRxOSwAzxMs12dMNY7Yk9NLOLHwzDm1cv7V4fTToCuXyQs7rin28EByG9EM+2m515E/aCtoTXQ/4sJTMKl68AIRQ8faf2nuKwMtETd75OCBkY44GUdVNJ0ZmNQLrFqaxU5H6LswS6vRNVcFtngYWhNSg9hxpqDtXX/ygrpxJ1x3dqfby9TpvEOKHmgs0scoLIjDfALhlstvla6vZy/fTEsCp1pEH7qC/blmX5Cq1sox5GwnFpTEoEX4fhDan0aCg8Gy1HW5fy7IaYq6OFLljLA3UHxnQkPnaroaa5edXhsRs7xAWQ6NbUnr8kItoSj370Pnfp2vbRkjOBJOngqOmUDXRlkrwWlZR8qozReN1ckV8YJMQ5OIY6MNcSTsNmhczvnDFCwt3ZtSgQfYiucYPI/wKv0wlwd+dIPzJcb4PryDp7OI+HICIbwKq7TJYQVVMDBZ1x4G9clV566SaZfsdvMysaHW3TKMG6DXJ9rQuSNLX0yl9Y+ol+lYKk1qmMvnDUygPmLW+pAzXxVNcn/bJjohtYhrbagIKqW6lUWZUgg2zZbnu19v8ff731VBGTm8HcIUjMiY0KowhHmxwVCXF4F5zFOAx/gyMT4ggw6xIVc4l11OBvWFzd78LX3E79/wWTPXc8iBx2whBqVSy3aPpGIsKg7mzWYjQBFVHrAxR3uOn4AbOAPosEyE/glg+WRdYAogNJayWA62Hm/lD4xuL7RCPpAKp5lmiy15o3R8AZEMcE3+WqpWmmMwCHIptO2uqisKjsIvF8qPV2HJT4gUz26Tzc1PQ/ZYTAXkDmAf7Fp8yilb1ctRDQ3r473LPqQsu2M2hUi0u83HgWXZ2g55wCQOs0xLgdoCtPJbI3oJGUJRMAung3X5X7Yf0/RPC7tu2oF296cCukX/VR7R/aMFFVAehUxjgRoeCfDRptLpgTqkFjK+Wja1gdjGHNfvhqDdy9QKyrMiiPFu7sMfKS0wMpcKLNxl0w/JJcCJsQ+6gZ5cg6GOmk4mFBNTgonnsyCsHXTOfQV2DNhPk+UWLmqT4RU5rJytIY+ovLGlhbuajFFwbJY3hroa3kZkMkkC93UqfbXo5+j26PbJ2QZiSVwC42u0Kj1VG7VFfcVEMoCy3TRDS+Fob/0JXEtaae7Rss/dwR8vIMhyTVWMlaTkT7fH3XMzmIM8rUZWRQMWIh8IP0Va+7FSMOSV3yPsck7ZBVee39DTGBh17UBPTM1X3rWOZYK8XJuFsJD196xCR2oSa7XnatOpVTX/ENLpj7mePHsBab/fDfBX7qaZC5XTa7PTvmlgWBpcUAT/5sykRVrQZc44DiWtS93pHEYwmEgNk54KEcrrtN9CQc1m2Hd3Bh0g5FQ0UuBxcP7I5UuWOQbz4+mXimMkAFc4BjTf3vbL8GRDfKbs6al7G4Mb9PSj0GWKldEcSxH2DntoXWNUbThjMsr/zE6fx2bYD3stBWRl+EQqFgN/vOiIrpMXgUsXPCjU/0Phfhfq7ZwtdIZiWs0MPvUNPFSXRi+RstEbjRE/bzT4JssIeLhWPuUYPZs3lqPQFN1eTTlVOnVwnEMnfbxcSsNk+OrRjQFlm/whdC5Z4bAxMYjzFpirvgmY6Fq/TsM4gT2l5jnw3A3yaYCtGOQKn3N15h1J+cLZ7tMXHa5t8xbR4KoXLZK9PhBShCU9/8Gl7l6w94aUxS4afX8HL5mYQ90AsWoL2IR3x39RYbVFw9Sd357kysOk/rvge08zwc+ogpyr4S1u8D62h2v7e7Yc4lv4Cojued1pbosx5hQKr4rhu6MpeCvY15wQZJ4XWoygDTtEbvYTQ7TuI39ZOow84noGuxoKtEH3u+jzqzp37ciTjBu+TSnnRROhNFCyC2LBcdDo+DqrwnCTeH8wL11g9f0H0ZAcsMnsVHvw2NJElNkaDvG9dCB6NzAEIo5IxxNimBdNWpKXBGey3EMn6sh6/CQ1bhq6eIAKBddkcNg0axoxPqAagoAuEadEh0anSBbLSC85pfniUxtypVENJG09cCxWpYEptVPK3iOJJK+5HyBVFD0fXBTeMDODv2ZROkCuGcwALeR/HpS5mlE30O6gKlPKw72bMBDV9tmDryDXSQKj5Z8y3ebI0gQfP9NgG7eF3jE/UBEpogeRekIVHcgiMjToYVoFKx2aTDVXQNuO90rozJPgCoueup4QdpIIjpTGBtV4kDkqjstCdyhghXwnqD5SznsRMSB8n+HA1CQLg0u7J/lrzcPqlxD+UAYDDkR+aTo8gSL22+DHUvKvYRz5McjxEJwa5w1Hp09a3pk87l7d94BNPIrPtBi3LuEgpWP9pg+P7BCVdRl9s4l+r7Zfuo7sUx7+QqxAsGHpsDzQwQSH+M0fvmr/HzITqx6JMO5KHRMp+heJyXEeS+wkbc8/5XPe08oOw10vfJIdumE0g6X8mvPPsZpaxF+KPgiFmztr7SDilfLH7hTe7Y305Hq07iBApSG0RK/XgCgRt+BGONfZ5L3GEWkr3L2sL3BNDg17TFOikB+KnM5zEN6SqglNjdwUxnleBM5Y0StRSmHZ6TbHtxVqEU3bp34C+8+7Dnba0BK4MnC7ko+gX0tDXEBSvuiEEcDnWCuMtxOvtBp9WeHwYmtSq1sKmXr5gg0dM6GejYejgyF8MHNqazQpZvWZDFms64A+2m7I7xSuvZgmKqg/7zZVN19KsiaFfJw9HwD4RTfusoeqzpml9cVqIEKurvoxt9fE/mN6WPhpYNf81h99spMgp+2qV5Z8p/9yFQ7ulYZa9j0vk29opJ4JNcGzRtzXxeEqO6srymMV2BvvocN6Nqathnvmyw8NNpk22+X0+5c5plpyj/odN+dVtMAc86fU3Ld7t0F0+b0n6Nm5ZXX0KYQI6e7Mx4AEyGyUppiAOitJISCY19YhJdBAv9gu3c38gHmrIouRuaHNlpbhd/K1SvMKKgvIVBWUGtm9V9APK8nQbNnhkbf6QVwQQPQm4LYacjmb4JFkLPxxddDKqV9DBjHcBtLxrYfoR6p2fgMVONpdfDyyY1S0fNslFaMn7UNvPIVstNYBCNNTrQ0u2oJt1lixwwioeBWbVkapgeL0r87R6NNK90DEp3NdyY1vUMnqPLwzFnK7Y6dw+ltZESgtqZr+gfDBiCxqhEJvgQfb1KUJcWh7pqvj2YJ1DgNVJR9rWJ5wJi2OBuiK4HRT2Qiw3rEDY8A640yjNsoOxqHNdeJiHfnlIwGpahLhVoTHPXlyGUW9uOkyo9ZTj7hQag+M4ketw8MvrTbz6cE9rakKG0DbY1flEN/sCG8mm0J5MABWricVMhq4kQZEbjmiRLa8aHnhimLFbwYS+1Q9yXivtOwKkivu9cvu5nVaRSzTP5DTSp9RfoFazVlpVQQYLsPIKhGv8WVgLxUDFEfvrSkL5dl9FEXeSB+rkssDTvqhgKyAguIdCcvu0RPxECG9dAuLk+erNuTSGAVnSvacKTus2S1MMRwTeZUt/JaVRY0t2HcBnfC6hqCL4oYgWT2hZyqtRufIgo4YxYLtKx874TXK+TeXsKqkJKFbXAYfmPtxwHj3QodZRauNRMeizgCNd8UYX/P6Fd1FG0B68vdVYgfx+AMN0wR9s1E2OBypUToYQ0AAV4TfBKYAI34BsezZta2GdZA+hob+asjU6GoRFD45CCtPIQnmkOXJ/9pkrMleJmGawUVY2ncT3Z8KJpO5n9pYSbuvrM1WKnEdDnHNg/sBrh/3a1eP+b6vix0GKHwh7GKP/XUlGcvp7Qfa7zx8EYKIwduys7xfGZDMko0pvxlMIyPbEASIOiKrmsR3Fd+1B415O43gVead10KUUOmyR+y9ISCFa8Y0ChK1V4IboraLbaqCrr31TPd1oaJaEDUTPuPZWKFQb14ndld74u7gohEytHo3IVpIwWinGWPdLzE2fKvbbcG8y1h6dOGaedD1WJUvytf5qoggbwN30LHQJgrU4iAUCI1byw7yYv4bGUdzrlv8MJuBqMH2RMxNMTvzCL+8DCJzawTZav9y2DcubHo4mqb49zwPtSBxbNNouaWj2gH3TCsq6lhJLUTGkqZCF2OeowjZAt2r0B6b/ZmjBDuR+Uc+E/SWxIAXZ9cXj+xJesaWCGRvrz5rgiH2jdFotTbDpNrTpnb54YmHHk0trqmKyDrkEwEQcdxmZN5IHZ+ccaPAwYipA/To3Xa7fpIBz3J081OSUpzP775PNGKUyHm4eMJqvXLIVCm3t2vcQ8MbKZaNJjYjyLWnRAWSdBNwjd8PjYaglZA0DtlubefKCGqfiXyS5PBCeEsKaPP1haW3PI9btQ5ssZfPnLGai09Flp+Imgtv53kRLEFpnWu/X5/Tk47QJCE9Yg4x6xfwnoLAC6pxUkrQJIFF9V2aSHQXYOGTJWUzvEb0P9OqZLohRKUyAtSBKNg2tX+/SgNGQec3w+y8vieduaHr0fhfMTPVR8f7UBKx20pYPgwq1oE52JtAcRfO1B+idJDyJwwVdBnMCc8XreOe0JRXhyhclmbI3a+w3wWD7sL7SkiohZmqo4XdJKzGxbKcWSoI/hQYAKtDiNKkZQa/u6aUtET0QoYYI5I/nExB6UNSg7qva1k42BEUxXgaCV5SB2FPDUAuns6NvDbYk0snsDS3O2VCPtw1jvIuMGvZTs8uzlVkpFd6GLpX7oXOn4LJOy48yRV73wI7N4pjKCauISNPa3k0b/zYX0QHQIxlM5WURdawWiW1pMTrrvauECyOguFeJxPFDkY2+IsSbdAMRaaVVxn1Iye/WibhRMYS1O3lBNhzGHtqGrEzAgKhMkctjj0D8qlgTr0n92AcU5Ux/oUC5CCjYErGwYp5LEqvlqUjsBKCWEiQy/C2ZYAQBwUaT/HR7avJwU5yY589kHzy61msZuiP0zQYXvPV+616MuFCz+O+cRU8FygafJm9vKk77BYzC6WELI87wVOZU73CXvJboQyQ3e7vuqbAyxPwi0cBn/dQn0L7mwCeCnq3jBsCXde1tu+2CC7EcBhfw1HZo8yb59Eti4S+W2mqgeAXl2p1LUU6QDRoswHFGtuWEOpnPW4Sbs0yGzKNmCyKZRmeZNZcIEVJxD0a7XKHp/NHMX4kL+hOEvUmRAOXRT/ZYiYldcPYHMRWsAkveZE0JCTLoyvIkDeOC+fjO5iRfBXIJaQHFr+fpnv+AbphxokZ/hmNLnR1QENJzwEb5JzEu4AMP3qbnSWhW6I7WHq9PGeAy+PjJt0w6XMyBG/oz/ryhSf5QFnYTqEdVU8fzpNdRHGv4hCaKQbro0HS1jmTrmceIiy9WwTiHSe8IjFT5NGQhGUl4Hxx6T0KuGccrhiyrlBGr+5QatQxS/L9kvceGDz+LRK5cXQdXB/aMA7bbUdFTNE2Hn/NerWClr0I5jkA02rw3KtTRaEcDQPzLbIaVeWVtwvSts6fjOY7aNGEq+AFZiGamZJ5b6m9L2e5AxCtXmCRRjzSk6CRq6nCSiUqlh6KDRaqxTa2H/CUot/ewwRd9ixmo4c/c8EWI6C+35rLBKUg1/huV4qF+RNYSKl3sHQTYBVHIFRgN9xRHpLLF4H2KPWdl5nhyzczQeH8NEr7uiSdCu3JnGW+zaoUxfAetwT9egUpCgpaAqGP1bfbUGXOxWaAI4ZPZJiT8Ku1zsOm8IqlcaMMbaV64JYEz4gblEikcs4HH459ZpgzrXyuYJJNGwpcoeYsh557+XJ7+RItYazxNcRWEf5++VhxPBaQ+Na6CBvOPNRuUQEvMwjkn0gqE+lY+4q5AKhqq3nKopDAo99dw7cuoVYKNhtByQTcSAqz6OhxwU9EZ9XxJeEocg9VIK7zLwboBSfhjfBxCLv6LOgDrwCDJuXZhxeJJ955EBNOKVgicgQgBi7brpgdYaM2g2EfWnZWm9wuoX15oab5jWYaseskmKgFPcqdQliWRshd6B1XQyR+jQLLi0Y7QRgXAbZL9U4Ea0orNbp03nIbkIgvSzZ1BY1vaekbyAoEiEnaByFB5dSBHRmEX3uD8Q2sAzJReNzx1SO+XbyWw3i7kRf6zWzCY0iUBzX7NPHPIu1zeUMuu2Y/Hh8AgZTKidk8tVE8N3VGnYBooHzc/BvCdA+Ybg18mkVcD61x/J9/i9hnUxDsZNMrOfCYZHkHzmatbnKW7i+7HMHUBxculXEKX+/uG9PhS0IOS9NpNbRw/qmma+RMrlpvfR/qbnL2dNIkfVTTqsXSGmWYsu0XZ5MnzvqvVJ+b6ap80ChpId80oDXrLDqe1M8V1VKcMiEJigST89nf1LUROzeSx3XCdPNrGxjAl8pdC2eyzB7JyDIef0DBtfjwRXdRHDjLff5Qm5JDfUO1l8MhHYaWb3Zpox1B6fYwK+tBDv/vXiETD40rm6VKSLmKz086j28a6n0VzsTpzUmEFzW9j1M5k++/+G7gEaw8Rov/4OPOonuvQYrcR+HKHi2QR3wmCVnOMwughP1hs/oSIJ25oCyFoxW7rnwTfwuycNlQNO8xj72jzfRbCwQK8yG3/1utvEZgNwQHKsd0a7grEGmPsA8/K3ysqjX1WzPI9pdRF2UJOjXDWq5UOKEEzqgigG56K0wnMh05KlDqasreAgt9Z/kiKm35VApcbiKoqwgVRIPHFGm/oQNPs8TFX+hzYD0ggO14p8wTXN4rkdHgZMXzA/zRbG6g9abyJkdnl0W7pkP+yHe88fSq7NwcKdit1Jrl6a7IrxyypzpuG2JAfvRR/mPIh6itS+tOY5tsk7RSNgtbj+LZnnchc2Qr58siTBWPGwDFuVjBw5/HV+oedkS5oJ++nXHz72GGLhICZQrtMineWFHxfcOD2eaPsbCfbFNmY8tfMrpa+GZDCD3mMaLzpUUOSkrzNvMAbYvQPzZwnAdEMqKmAvM6FBFbeXypsNCZekgBjRnMI8Yw0oNE6SEMIQVO2kgPAiQ8G3DfIdj/46l2apEbpgNG+10LsNKJxCIpux5op1V5nEVsqTQB0Y5StikjacsfbV2fWqAQpZfsiJtlN7UQBAOZn4uM8gbwRflNZSSJB1yuoPZ7pPT3fOjA7tBsgNkHkIebFsA/FhqFd3tq9Ki8ePV/t3E6c7ehQbIt70umPKMKfZC5WVvzodJ18Z17G39LcCXqMYZ59wpvicceC3Jkq3SduohjDISX7DnQCMwbupoYFCw6S0ECAlPDwo4TNCqLSychNunzCLd7KZCHV/RZDwztL0EXYtIbupqbPWrzVxLqiPpI6Ndu2kRFFT816BiXRS6fVdUSgSHqt9952cNP9D+MhYGSDg8K9ZKiNpfxnjpLg3rx0o5+ZymlS3cN7EYQU4dpLGMV2VXPDn3QoDsrRaLzb5MyeNriI+K++R8PH4Fe+0FGz9cg2qaWEHnazhz8zBPJXhZq+aD0nTahG5SXycNcRFeOaiKxUDc+VFpm/r1Ar0Ww61Zz27XSWCaS217zhvj6e767RlO2bSLx+x53qVfM4QgQF6bfCCst1IEvM5Bfd0//gzAK/dB6NuLiLye0b05iluL/hAFDwfvPhQwI/BDdwAcF2yiF9oEUmi721IdVMIjGk2WvAKsgYp5LyUeUm3u5PWIbzQbMW/5RxtRend1M/wLnyEUu65WX3vIk+Yl+oHGoget1emDEBmCEVf8EyNkU0/ZJpaAAEpMg5Pn9LHgSTNS+958R3NALIFd94sk11oolI9LEbLF78LBa27GMdusB6XRXLZ2Gf0Q105++5jCD4el54SZ3m28V47/GkPHf/cbztM1QcmwjKZ5mHdkN5w/S7MpwIrYjROr1+tQNKh0w7mTdVVmEn/DhzJGBgEG8M1RuR5Ps5Zu/MczVqr/dsuNe8agFsqIPP2liv6EVLwz0F4k2aNkGGqDDDejP4KcaVk7wrvhuaIqvAN3LtvojXrEFgPjlADKJ8h7Bj4lFPRbR8yVLbYOLGrEztmIrVhpPpkSMIMtnvEm9/M2YGaIGiUPXpG95OElZE9ezHGLxEcO8obL2pOuZVbrSTmSk8va3vGBVpHTVIbWx//ES+wItfPxHqxcYvGWDpuleNr8NPbJBVNFWLlEZy0GB/B5QItrBlvezGsaXN7YdXwYhXkYrn5ZX6nQaXefkkDP5NI1OTJGRWgwKro3GAOYBtAv0uDfMYmsHbl3xSzGgm2QKS/GKhNJRi+c/T5o+v1DiB036S8iXniR4AmSy3W+D2tgBSpoA3UJNWupjs0JVk+9E1mh9vn85UrS/eH0LjAFsYCHtI2X3dt00uANONm9IrDGhg8L4EtCF4nvezTUiMob1iTumjXlRmwfcRjpQItUTIMfUhNHfUht6AlNPoJzj+qVBo1JflYlNb0VjQj38bAlwPz6aw3Z0q072218X4Y4Nnt9YcDN1igbPqz619p+uV5ViDh+dpLhiyD2m7LDjNQW+HcdrqV4GR1SpoCS66HM6FaQSeyqnkYrmo5cxtwIeXUwCy9uhrD4gVnHyLorA7kNsgbj1j/9aLGJE1RvQsS+IAlK/p5yE8XcZ26UxJ7x4psUoBqt7K4l8JPXV50FD4DeOwmEdpwW7uIAOVq33EYcQbK92tCF/IEkvZgsuTbtBOFOyo0898ucusi8WewtoKCajmHosmyUYqtD82O6spD1aNqYLm+VV0uDbn5X2rrbMspuQJZbumnRn2ot3vfLY2oRJ2uKy626D/0TjBnU9Ie6dYbvw5XB4zIUW3zNQn5DLQueJUjUmzlR9nysWDGyMsALw1ULvgq7hMMh/R4S1BfSHJabrlhlCzryk+wrBpgYO2+aT9Lj+WBEEa4qZF/eKYLa6EvwuEGPwxCUBig2wwga3fDtQH9q/B7vgg983aTe3lI7t/j+T5NCak1lO7ktb+1q+B8npG4yKaKxQKIjB4COXwgzpFKnlJWEKVBM8kMVm8EGpjZe/sQu9fmN0ZMBuK+Vhpxf27XvBqOkz41QteJR832BwyP2qyhXDk3sznqjKOpQoh4b3hq3H9sZE7htyLWzYiCvSTnJlXU5+c7eFZR8//MvR+DlP9qzIgM5aHIS9AMjGMvobz7HAotgAtPeODmXdqHL6VvetM+bhrDWIgPBtl4QPf9aUZfkgnlpux3loEcsE+sc6V0YKd+FZhQlxrcw6TK4N/2wJziilYpztGa1TmETQteI0mkRB/D9ooJAStf3XK4IFZD+bIblBnydHDVbJavCZX1wvvc5uza1s0sClEds5D5ogkWXmBtAV3+9t6TekYgcnK2q/WSP8bmWQmCJqioo76/o3MzsNtXKt8vJ3O7SL9+Xa8/uM0pNLQntSkDz2MEewmjNeQVXbHPQ3jyuh4oRqK88G5LO3smVfOYVZetB8ZgA4+YzQ9wc5VTnvxZB16xKm6E9zJhAp7V+FvOo3ZsGiQ+Nq19fZZ3u8kAMPTsZFEBuXJ/9s/HcQY1qgrRj/OsmAnX8I2NxSVKs2R0Y6azEljW/i0UsGSIVFSTQ9LEnCMBbd94i494Ja65tYH01GIEfvRRcaN8wvB7LYR63ffVQL2a4E6bcX6OEAwYXQYmyWG9lHLOV9VTjW2kkGBNBooKk3vm9AXzRzTYcVFE4Cp78gniajGTjrRKt/Qi4JfI/tjsXwN0h4YKwjawECKAG9jD/W29y5sqDN3Z0l1T5+3FO+nQRgy478X2RLb3EC+tOihAFEgaRVKalnakM73d+ZMAmbyzA5hMszuMzsee1t9kvZkiDA7fwk1LZEKek65YtTm5UDGF6+tZL8kdowDzxFjwJ/kE1diDWnlHhR2LfWhJXnO1+TuXsmhrc3tM7uX09dOv1WXJWJvEJvTFTTjVydhVLUw6X2NhdRKP5/RgjeGxjbyhIrMjrz8k2NuVN25NkWgVfrcqrgtMDuw4EnXEpyoxY/vlg7ZzjC/Ef6himrPggQFnMFc+Fz+lPMJyf285v2lcCrvNJPKDgIdzLl639mhOHpa+o4/SEcplvj5VXF+gwuN9sDoXHJkvHtyJrshYuFReHKQzXCwNcoQFn4oERXJmdUajlv2d+M7r5rvLzJ4FzAH8bvZy8HrX3NK/lm2LCEVidRcMrhMXAbxigyChBLPwkoOg3ka3lojZ8YBPuZmEMTT5kf8JjwBA9PAueSZswvP7GOchgjzk4iwhF+hk6bz+3FAOzyohUHsUA+hi2p8c58gWyNdOLm4ZAdR4x64C6008u7uQChjUy7IWxKOHzRlZ2oEoHwji+BQmEQ9Y9QaoCdBglafE9xXqC03gUv9ZnLd6nvH0MDZDWTvbSS8cvoHLL/0atGuTlM+glSWnKyRN7nMC4VdhGuYRvgdcwzGdiujO91VHZydSdRZh/Y3F9bXYbUW9mkBoyU9aZv+ER47TfDyXf1hwh7upv5BfVDby3eu9PqgGahICcZz2dYXNEmjgAXXxmV2xEqLnz9MxYarMAFtlpNeCP+zXpv/75pnEF2BWBWq5med9az0hkYCSb263dtkTDxptuNq/j63J+X+5PeR5eRRLEsXLMcqGtA9kWAEELF3urqis+AKTTw/ZzfBq8Rp7upCjd8shUDTs8CiHp3eF41FQpGRaALNEadJaeWXxrW8W/kAnrvSIQI8+oPXHV28ZViwbPVDoCuqByAvXGL7h/hemfnyW+YIsRgNRaDMMO+zS1YnfsH+3Z3pGab2ax0flOn59e0rF7L6zE2Hp15xiYdcvY+rLKBQ3Y+AxDhx01ootS+jSqKHORgtuIbj9a0BXDsisKyGzBnFxl9N2qUxzTJbFUb+VJHdJfGEd+HrH4QW/ge0CH8ItdjKZFlmrlc19BnXKcVT1PrjbWxqlGs9p0+NZthysCCILz6+BoHtb3EKQsLGe//Jju+oqxnVnm72/DOL/SpJMTv5Hs+oO8iQg1aFZjB5wyOCbT6VMOOo6HSAPKB6bEtP8U2dKyNOkpAbkoj9mMD7pMisMdgfxCSbayDZNZ7F7OMYaoojPP1pVUQYn3SCPapJVkeC/Zzr1Rb10xktGOAqaI5s6jOlFYUNc6XkAg8mkVYNcDtGYf0Velkx5V0mWdxVnEt9skIKo63cjY6Ce/4wneWKuuCpfFjz8gxXBgA9SafMoILG7l659AbwBBk4ph786Q2Z9boILLbXc/tmBfHGSBm8VWStD1ybk3ABhaVlJpTSNWtDMdpfpiXILNOp8qqDsTVdhLTlv8BHvNNWg0fqr8RoLbcEY5eJ8+qXUc49apnF2g/q7K7jvUk7d6dHad6qfmRR2nBiajQnXN92PtRCLjPQ3PN91r09CSwx6oJKSXM6HcU9VFbG9Mk0ZJwHjklFC4KNytt+ejuwSgLR7JrdGDNncmCSk1Y/zKqb1b/NJ3Qc4zB9Yacya9tddPG8u/i7Mf+vAbZLXUt6AThHgIABDl3ERMdqqGunIjAMdAyGs2ddF10+sDthbYOUBl1vOwyKHPv1x50jycC8DsbZxxpdcrOZOw2qr+Y57S2N/ppDIdU7lNvdJ3H9p2hGjUIuG2TRChk+48A8VYAJOG4TPGuSg9P2iT5ytBxskEj1LJcS3PrpcX6mRivzMt3MvnC/2jdGryHGYIxrkhqpKUl7nNS4N4G0xl1nB3Tw+UGieLtQSwt6sbwnQWCABqHhpjIXcDnwxP6ukoznYT4cGRchCrXwbOg+MGi2k4UVOIXEeM5eYnnI1j7AvpL0jjopafsAm0c0RVGLn2ILbXafxS+GeetohrmZbGmjuuCU9T9USpZA+dtbj/NhTyCoE4xHFguIhB/6+9FYK8hXh2mu94n38Ezdyj+1Iaa174lkKmLsYntpS3gEeLdVJR1zzyGNl1jFg39a2UPH7Fcn7oUlCaCcklBPOSFSpsOGV3aKORwVaQJCF99WWPJqngPQ3oWidYvFwmzB3xGEpNlJLAek9X6czywbPXJbpJi1nRGvd8AH5KIFEZKBTd2rgsvKiJDDcPJ/fjMVEigDL8SIhLF6c2OkavHY+lBgYDdugh3Hg4nglMMOPenOTiFuWttVFpVRb3o25Pqf8jO6No7LFD3cP55yptL303y1v72ANPsffnCRrR3dMdtrExHaz32nZpqw9peTzG1+z+pR8/UAYbZW2uVRRoYuEZwyw2Pgxz5EN4/k8ZKEniuCYhCAOksjwOzIxhRN8+o1yebJBrCM9bnvrWqa/cy7W3SEWz9u0J9HyRTVSEvBlO/lM4qfJkUbii9jyNr0P4gSZp7dHE8y8yZSdXsdpSERwcC+rJR5DheYvMxEltUDJh9X6fyGZNvzcS3mpHxLp/KCAg5o0ujun2exOdjuhyz0YFhvBbN8hKdGd/dxF0I/qfAIZSmhcpq0zusMZlYaOEOT9lFWZg6sQeo9kCBTm42axWGwsPJE6/9aIrdkghmneCS0cjtyVsECZ+6Wg8nr1RJ1YLilxFxswGtHzXSAp625v6iEZd/TBO9N3T07XBi/HdLq3EZ+RjxFxit0U9rsyUz4/MzGYfolVRwasGuZ21MvmUSibOGpH9o542Bfi20qqZ9qD4jRuBalK0Etr+zi5ij/GO9UuQUpSo/5ctEAvFPMxkhLhLBfwZTs9lT1qg+pXqEjSuJXQMIahDL2aB3mrA5RGlGv+6WBeBsJm6HmlPno08+Nd/G/3gHGJbI/FgYhcFysDNvuqV9EXw9mKRGSvnnIt1+34y5zt25RETxApy6rf6ldkPzJLDgGNpeQussnhGP8ak3ZxbmL9jstdpAnids+E1Dt6pLDzFTB7gr8edMcdE+YW1OI8pl85zk14ZMTO0J9CNLsnZ6jMwRj6RNrHQIa6VBJVPTP0EacR/vlxOHoq4a8XWx/BytwwyBNDBSkibLF8vdJad1jMeNiDBw5C5HgrvHqrS5b3CAWr8prdc/PkrKRO5+BIhfvMs9RWTgGWY/w3e36HkCh0cizQgIqWJShnTrwU8z0jFO2+u2yLKmLhwbtpQvgWsN9spAOChxQ8gnqIPVzULGOdwkdW3rjdCwX9L4XNZeH/sLAltMdzgL2ZTZjOFIxUHbdAxGeFo+NEyXL4c40Kbe7fHEck9DboiipM3bk31Xa2/fvsUPKTGhXW+fX4f7FmCbwsyfPOboGN7oy0or8U1aliNkkYbmI2ZnscxN+EUilDHqRFVZpY3FG79C75gxOKIeUub38TpIlL6wIglbqebMVKf0Z+PevTHe9z3g5j8+2pxEgreuW6uVG8FjEcCI8FU7Qe4+oXyrfl1PvCf4nMneIyhyKRfneFuN0XOVrag8TncUbMLCX7yvqs83AcO8eMOivA2ZF3rM8ALptciNcgp9PCRNMgekeV/cSZXCIBdgTDY3pO+3WTymK6eaAl3H9/BbZkVM8Jdj319oEF6aUIMLIb25c2jXAbxoksOrIBe932CbGBlP6s8DAZ07bZ/ObHL71pQWthzlfMDVjgsja20Hkc7wgu4yq0jPponTpote864ZIp9uGaq2UXr8+DC+KVDQ9XFlvyvl9i/kngSZXizV5Pjp+BFRpPn/LxTQkX/vf+D9Fypv7PFqhM5oohL4Ts49F9hpOYSKb2wvLGlIVmllnzn5BAlTP6qsVH+A02pkslksABCWrSacjN8B5N7VvHeLaP3J9S903p8WPSt33XszUmaPF3/Fsu7dtn0y8TURhv0wtDlD6bTw3DRTiGanlF022qnTGsoG3r6oU/5jh+FwHF9f2m6XBt8jDK+aeMRYNCCw5w01wJ3T/ch3UfVgEuc9NbwaZINcdaMIrB6qm/acYcEuYyJMAP9C7cCMXIwfum0GQkwipwvVqDL3Mtz8d4bRNk7sE9lrqxLIW2wDRVxwSsWckkgVnj39HLggXxKJxxUhMoRlBcBnLNb+SdO1gZg91PGK6uG4cqUkRGut0RNLmGjGLShtgcAL1oYHTQjepSGasbsla3ekPBzfsONrOxpD+NvE/NHW9+VivHHoVU6PpIwI7HJ5sK2stlAnpXyXstnL7N0MsweW/ndKeAq+x4XLY97t3vmJYOx0LY06TcTnISTd+ejNOgCxWOYKBAaYB4u+nbLh8O/EY3tAIrfn61MTF7HXA5q1hYb36AstI464FaSGZCRvsZcbeUh6fI/o2cPltFVWcakwrwiRjMTQoKR7LKv2VuNU99RmnWyze8WPJutSSPPhP75CGDZ7GST/OAZ5H2kd8OV3g5GUbXAO5MOXc983XHaFGDk/R9xlkWqaWCUfNRktzcRwGSjOPtybxV1nqWI2NqgXNcWFABqyXTL8gU6Q1lcbPJ8MvPMAMwpEAzEfmQbV0BSvoi6GQ0WnvgjqjbESdMLnZqP1SlgwRwQvfvN8OUJGvwfS/vUi9YgF602L2QYbP5esANWoPOLGnlt6pSl6bYypqXD1BAICS5V97kaIk2gRarb9DRitYRhhSNzx3vwab9Hc1D/56yRLr6vqQkeXxuZ2m4eZDaevudZSB/My/y5aUvY8/htB9dxntmAl/OAgeKdnItUVJLQILGrvP5W59PjO7QAYQZJrmLUgbBRJ5JzFzv9Jxgsod0K81DO+xNbOUDv5CecOU/4CBN6Ovb7282uwGPlHx5/7GEsg4ZaNITQ4tg9NZCq6uPoc/t6V7eXjMJtdj5XNwBG6pr7KUtTrF4EE1sO/9p+eLry7Q/CWJJ9wYVt0IZ+xabi1GTqiUlbMi7n/QbdKffmZ3VRKuZ21VZF0qIvEAVnMwHuLyWtiZPwh2qGou5WVrAimA74ao/IIsuWz43H75Fk2FUIz43B9ttWZeE4EHDoMHiIBV7X4yR5bE7yx7CucFRKbuF+oEG62OWStD49KcJ3EKv48eXM2JcHri6RoXtkZtIzW4U/dHFUqkMqmDLW4xA/ut7smZ2kS+xHuklzAA3fN7VaJNBRA9maeqVJUHev9kT1t3+aqE657GAsZ/fmZhZri4g2QJP3M5XBnGE0JNTvLlHW2kNKjsPMGfzYHYpQSJPRGAYfjJVbBqaTDEh5bLLprO5Oq6LCb9A7N6OYcjgFGbVRNUbQ7lfCOmVB0w0kGnUMdiSdjSvXOv+AwM7DLpRhbjcPPixEAMXfC6SWGyRROK0OoP+qusHAIyAujvpDRBYbjR/17gnzssmlIeumk43tVRj1JP0B1a4kov+g1ekKwUjvxNKxo9lb2+ru/7W6V98UAEgi+uGFquuGV4ZLyaKSnG9qdmY7qQyfV74YGzXeV1K2Hx0pRmZexMYg5c6HohKRO1VobY7RMvluTNK+xnP0fFcC2yFHSVGS+vEDRf1KUwaYVfSKp+HwNvT4n4AO6V3PLE5h3SisiSIVf4OvTt6x1g4hqK1y3mwzyUY5ZWy6r63kCWMBebhqvFCkktjdGRd2k9ZWFy4qDWWQ8eiYEhDp0ocUuHLe9xY/geSEbS4XwasvM/73KWGk2jN4WniiE70x4CM77pRlk+s+6D8GXo9+iFgu546Eiuvi2rWUDVBWmXIN0O3tsVA3Ar8+P+9sizVGTMJ7lQFQ7/zmc6cloFivcwc9uff8A9g8cxTmOgMRWfqV8F2ejt0g6wYXKFUv3/CMQjeE6uwAAOyC9gHibaBgr7jsan/VGzyFF4hEqTla4imTVNZriQUTKCEzt7PUT1juVuLlHjAezZaB1LXX0pcewrVtXuBWee4xhZ+gNvQfmLBumjuuZZM+FrQwieVMAhXnuxVWIlD0Zv+lNGpc8KFDpgSpDV16/Hc+wz5xjH3QhU6B29xNVvB4LT2SpECcxO3GkC4LDC9qnvyDu6vmzSFsHB0rem0k5+oW3yhQZy5LaqgfXWCkcz5tF5oxSKa924CLa21LeipjZx7byMR+hot/kTB7YTfy+RLwB+anUfDnP3YOhUeEZHfJfFz5yCcOXrOKQ97ikDMo8ABgYPsNoUVgyOnt/HJuOSqdKARQqd6p8/H5WtQ+lbpKkLNNlHvPnTRTi4IWltWiK7fZn0lwD9hGP8DNOSWBlotcvCv71algKYmO3suLyrax87Xr09jcYAUFgpTwRyrVH4SimI7EfzH91BRFNF5WQDbA6qU+uOeJ9fSyG/veDK61rRiwprprRc/vrWEZ6PMdF+AVxln9cGcTy6q7Z75pur59KvvKO0tuSGM9703YaKHehiX+8ejB26ZhHcXswdYITN0mDsX1KXUXhllu/v92i5301WnD2huokFc3ztNgiwc/tVxZcURUqluZchZTU9mTr6mPDUSrP0wGnWxg0LsU102p6RK8WWY7y9/9gaqwbecopR5tdxt343lQ06iMq+vQtNceFmZBuLAzxVbVDnvOqT/rI0MTO/2Z4xQwj+4UyAchDBLPanYb/psrZl7apMTp2BwYzagu39sXwPi8Snu18u7Da22m14P1Srg1RUvXPFdEaQq9lxS81TgkijshWcQsqYuHxK5DW+ylHDGUe/oHZ7BCvggRSG/DVr9Mdox9MiGRc8w3YQgkrs+SvEq/8AdhQx3ewPlXDmfy2o1rZUPr5lIOKpdDmW7g/4+DEAGonjMG8LQ5l4N+SwwBzB2ZHk5s6+Lta8Ar0BBpWclHEv2j9g0v+DodePukJbzvVFusqWPnPciJRUs62rSaS1u+13Pala8VoRjobdZ/QW9NN27utg+z4AiwZtPKIUy3DIVta6jzDGwUpFYyqOzbUH4wWOZwkh3fZ6n68oJOerJGXdDOTn48gO1ykIMjU2XXqlFkYrZrJWJndhjGSHht1Ufd1nEtyqKAoPfJOJ8OqUmwYG1mz0paFJM7W/CmsiTmgbtSvfEIH7shzJYgy0Ka1UreXvi4TdPyT1bB8FjKNHlidhCvta9YaIZGk6vUszya8SxYi70R/IbbCOnqY2b+2WP3uqAFKnYOzUwnEfvE28Omrglhm5jgvpv84YG7U7RI7P4tJ7TnJ7AZ6G8Bsubs7/Z9rqQxFT0d/B3l77bmOfwvJ6VdQz+6Qj18o0iOeBgbCx+s6H8mdjK6gnTffdqwLfBvfpRGvKh9PrbVk/3AfnS0KQGBdB7ljUdtrum5zs4cjSNAtzNoIEx2g5jriEwH2+LcLlBNUrTFP4ELxGUvRRtZPGf1RPxARUvvFUCgtQLH/fCCWL36qHTMvFmCWSjO/gsWC4FDGwtXoQFZ/jB+qrbLaNXIdYR0hxKOw/eG0N08D3W2OhWgln+IpsU99vpiZUVec2hxbDNtihXUzvie9HFva3sSSdm+INNwqq2qB/kW7MU9wjM+rA5gUqMkx4ErenK924MwSan9GDciIj+CGTfLIMAD8IOFZgnxdhJWiUD2oiEtjgO4iQa+mDqQEm0c5gdZ2it1oumeU2yG10xwIG4fIRDWBVj6opWPpFr5Pu179trNWVdYKfhbXrUOK9xiAALqN1evBoqMAQ0gFEbCcnz+KVik9qlBF1MPeGdYbv8K3SrKKKIxlOw4bYcZy5mctUAgOyTIFbFUdr2UDQBR2S/rKs6dJC+uIu53GB23NJNMsWLHFeKU1XYgk8tFj4+qmDeAZIpw6yYmDd7tob8mYR12Rncal2gBQWpJidplfJ/JB+BZlegIYooikDiToNOaJXUgyDtcByHS8KHh8c7u0XiOu7TjocEmzBvpEKbYdPtcyFem+GXcP15YForor1T3DgCpeXNQBsqzzXltMkRyfAfuaTmuBVj56b2dE5nZDp7FtgsK4WKojhkeWflaOQw350vKt+H6sq0sxWg8J7I5lcy7kecD6vr5HIH1I8DryS3y2KjaewZgb5RNCF2Ribn3qwunn3BuA+VQ4XLdVcN8T0n8Mn9YtGOzEK6zlanfD89MMCmAuAUsUtXUuqB5C+UQqfJGu5pLrgj9dG5G1EoxZAAol1rlPNbVcUHGwdGfAWsHvgrBFgPOl4cNS+WsVkb4URLjHcPF0gGyCfVpK9JTuWkznTLBYrpyjag+629YYf8hfIUg/+Yll7+kNlodL5WBel5PIgSo3wpx444iQ11WICQWSQk3M5l0YozAScQAENV6uv5AdcpehWps5K3VX2SzCJLFzKMW5Lf9On6SzWHY8H4KybuPGi3l5Ej+IKmIvZItEiNuoZiq0w1TG4O7r0lXWFA9+Ju88LUaKryzgW/SHntoakwfRhXSigIfOuY8nrsc0iQYuRPK34a6GmyMsVzvuJi3EmUHNj1MTXUu1LZ4MtAgc7y6LZYNvb9YNEEX3Z/kJiFX8QX7vV5ucXJo1PSA7H0c/MC7nNyZDCbHKFGYJCmzJ71UTAqVWpoa0IxoVE11OcXixjfiosANmhAk9o79LR+VQv8QNtxeM+3Ah7MESN9VrqnCI/wj+wtP67e77ayER1+GIAqyPMBfDQwZKxiV0OYDZJesdjtXdTGByKTH+zKW2MAXSHfC3BhzU/gWI2786b7OKPNdXCMCqNa4d/9OsX2RViBj8fXxf29DK30b1bFCk8IW9XBbDbm/hpR3Mxw+OivBYtAJh5YPIFOGo8VWRJh2Usan2ELnQbTx/MAo2CFsat7MfUDu64xJPb4q9LMRzBt5NUWvB6s1M/1mlHBYoP16n7pjplK/7T8VSiv7IuMJ9Z4EdyXsAMdMGvqZ7k/BZv7IcfTpYavh8GrcEnT4+4GSiXlFNvf0N0FY07WoU16hhwRZ+28vQs8aQlahRQg9hD92pzONJ6/7goPc0VmXx31LK9edoZclJOvPxQREgECtyqIwhWmcueQn3gayUMwY1hkHY0XbgpAHBrZghBfnY4EbUXJDQ+hTI4/uhbQ5oWPWIGHNpwO1z2f7QkVu41lTZ93NXEN4qhNwV2c0tP9ECEJm3wQL5q/Y8wgFi0913ZmFavJj0zLKbhSZ8iLZM78Y9/lWj9losYlYFPlT4eKJWM//b2UJW6+ud3AdmPjrpipLzJK2IOd52UW+7tNY1fa3SSYILZPE0xn5da1xh/LzctwJzciEs2P0g22pi9FNUg7YbRqoWWc3H5UBXU7IWSyC6icGTpcJVlAdGxEnmeUTaFGBZHKAkjwmchIr9Y22HRuQmOEmdz9514ghvdtClgj8W7DAHFhXBWhgHI4gIsOT3u0vg6P52VK1aunkDnAuYjcZ89vJR1quBzO2Y2YhQmFZovq7ag+0G1PUJeLnGa060y/o2AMS8HqgKMdsGM8rvSxQigP01IB92+n1z4TjHakVhWpa/KEkvGLOeFUgyXkkV7IyZC5H9ht422UbvmY1nK8s+mbi3HuGUdO6ixXT+QnCSU5Y5VAKcAi0VdB7evXEvJUNTFr64n7UsTdq3zzQjv+1d26r8VVvf5+tV0xm/DZcKXsnELcPwTEacSRa4FQu0rK4ApKd24CtoCZwk0B9hEcCzZFVO+Z1nAeB3JbyyWROYXIoISCnRLyGqVSwnGPD6Y8iihTN64cT6E1LqxBZqUvffeEkn2QZxSQj9eFeFPC/HrWPNY8pGdAX9dgu22lLkViRVmCwWs+kYBDCrN/jI0lcB9nZz7nZ2Md4nZ4w/WRkkpUETmVKSZfnh0MJU+n8k3Ou0UmvY3FmonG5OFJxu2ayWmcbJ+A3FUlJvc+gS8DuGC+qi/9BGyL/yhtJaCj3F2ah6FOrHJgRpby0jItcTJp1IRGojZ+BffDit0cxi52xkl8MN3iX0A7b8nyxEp0SUd6ivTHNO6OsZWK3bJFGDA0pUpzVpxKCvPh0ReH+OY3leupmn6tAYLWLMMT9jVWQtdjQhJ8bRiFFQwLe3Ch/rpaK48i/re7AfSUqXFcr9dnN5qW8fdabT8zqPnP8TFG12T0ArJiyjIHdhyeFIyI0qWTD/UHefXTEHwxWp0uRj7b0K2RXPLHJL+n5+3Os6jzYtwETbm8kN7jx/VGCqNZm4FNFW2r8EfTY3P9lDDskWN2kbjjos7HCkDFGwE4MKyiIaC/nYCcrHifSxOrDYpE2dtlWqoLbIof1SKZ/a5n1fCFnPoiwTozNxbNry8q7Uxy7WbhJDyxbFTczgWZhKRg4w1OVXVO+aW8mGPoEUIDjwzutnodfZpo2joU72D/H1D7V8Abj70gzBLmbC9X+R8EhaSsUOCZmxyihyd1aK14FBDf80irlE8b2wl2IsfgYR2vPs/qi5JmBOX38NNfgXVXJYQwzjvBhTuTuxJ3wgA2bNiT3j23nBKXOryR21wngLzPRYoHNDAAujCsRpnWfhCHRBWCSRY9zGjDmsL/sGpqLrtxJHf24hd/hJ+SfJUhgzPblgYCxBz+o/v/o4Nb7kcZonxbWr78YjEjepDsfK4kVDjat4pZMmcxTL1wgtecGfLe8v+BT3yyLpKTKA5bkj4umQM1TUNkkxt4AMDCUPMDOXR63NJ4PEe893hwV4cXaLmtQtqWx5MriEfrL2hYPW7x94tFmI6vHks2PuBnIuLF/F/74BOgFvWZ2wL8AeW4+HDPuqDqmbR1Y40S/35r0gJ5/bSimr430PsWvrAvSqhqAm854z1iLia2KswxbsSvsaC5vN1K62TEEO9MaK1qOSZQiodX25stUdhPBXAU+R9STYMXWkJAZikurUIUEWCJZwd16H45hB9qmkGCKsU78B9rnvyi5H6vhEz7NsQ2Q+ShiyJPkEnUshBifzg8XFbiZmCn5FGvjbrLyClLIDeoS7zvgKUgaQbTr543U2zOrJLxVvKCu1vf2ZwKovQsAVjA438nTPq4U1WgKP4u/10tKPbz4wzvhntLSYEhzbqnxwYzLfiBzcfS3Yv0c1voZ/UJ1vb1QjaIGb/8FWSUAIo8sAN9gk3IfbOtoFUWHC3fCwNjQ9DqLuObyiXfvo/8HRUd4pRN/tO6vfPdNztbxLp3S532qdTF7h/ldDamNlx4mjN3lWZ4RyO0jlnz9+cJAkrJepQy3VOV9YocqQAfaOsB1t9aMiHA53swJW1S0kePRwI/GViblVIujWlJOEOUoNfhGSvZVkslWuyumNfxEFqmfR2yHWEiHRl/5TT/4o9OKNmndNxc74WFSfgmND+r/8vdduLIy4IQRRC4SxRjC3GCJi9/ypf3kNqLEvnu+4QMhMo6yM4MxNomqBOrz+JWacRpkMof0HZeLqNPsA6aIobz9OdNrrj1uJWMSUI8z7DrrPnLjCeqPmKnR1x0t50kEizv1mg3UrS5yv94SJUGTMk8jTQxTBgeEGSeHTLnQwaZwWmYLadcnnhC+gbCHfQ24EblHPRHlpJXm2LutKYyVsdcKzI4ED6yacYttUULoL1iSHTft4jnZKU2h29On3//L0t34Vg1w4XrX1fvfhwu+cmOGIjLdC502BO5VsJF+nzKyjjTWtQry09IQQpaGaJiMsy0RLv/+3WKtqXAVXO7q6umpWXq4tOxFHZNk6ydfhFZTyKQmAH5J5tSszKimVizeB6mvhwHkr46uKr3SfJc3AVVrIxJYrrc4G6/rHGhOXZnbOE2TzK4MTo2NotKhdj7MbLGM60+5nZeLd2NYky21Q/DoHa9jT39izxyt/AfkmNlH03Xv+aTS1epjZMbUnmZ+6S7e0rlc4wIvI4jG+2fe/u2dL/mPOwk4ks26w2U204nUD4PWvgSeS6edYysrMjonzSSEZs6laQ+7yQ8lDVa5omHTQzjbNICJgY4GGtAZx7njoNMP25kDwurnz/RJDaQPs6O9fkUDnbrIb3DHRoEiOvto6/3rsyRI5Qq3uhLojwQCmhIoH+fdYhjKZDjJ/EKSSwHD/2BxbmBzTZx0IsDXVK5SpofjQE0BU1L2uiJtkpGSYvHKRZUExOVWt2/5306xgoaeUZ71OeQdncXQo6nx7aQ4VOC8/u+MEjy55bBdDRFpi9g5gNBtAOEEmdBOD9KQ3HzJnM/o90ezAMxAAAABjbtm3btm3btm07H9u2bdu27Q7RQe5NSfNo4eUGbWEU+4flgJo+7YuIzMsfP6x/OHNAFWof3urYApiI4E/VfqY/o1RqcGetZYlow2VqXNDTaQtkT5HhISP41WQKa9fKxeVw7pVc8QtNvDnqAH0Hf9UvmGs+K6l/kxVCPyOjUucrgG3m+YasR4UWa+SXpSXCUHjUoyRy/RWYgjSEn1uL5CV3H+F5UWCjK/Ex+wiLMF7W1ILbz+kZLa/NEGf1Si8enTspFI42FRJduYYQm7SkiBOiM6HLLRclrKZtAgjA1phRbHC34BswyuOpXRj9PdmI1fkoLPeP1Nwl9+DWzOBmXjh5Td5UcjqFTkdhqQ9Z/nXVDrjO9i6L2nO6py99+S7VEWbmXOwc2rLV/UzyITvdVmA6zApxt8GmemTy1n5ho3xdUqMwp/07UsaPVHBrA0E8o82+RrGIzXF/oFRg2MTbI2guz92jLIZid+gICvQ3jXI3hqOLYeFlLouLIN6NI7JXo8aRxcg2q6Wkkf4NMg7p8U8DILwpBPGPpFqNW5F/4CdPuPrPRYtQEOq6utKkLjojd7yH95iMhSh50+KJ6kNdhoOkka/rYT7VrJWMu78UMKA5yooXwx81YDCgs+KyWnodUwwk0HagVDPmN9czEa/L/gFE+uQtfXLW8UMdTKIxRsCr1Uosrmmi3aVZsdYTcPCg1KkKGwL5Y663nZoASmUwpk9P+dTkxA3PLrfInhFA6ff2qQ7IyeHY+OfptZVEha+s9lACHXnsUjEWKl1ocEyMAIRrKLk1VORRgxL0u2ejdzQ5olQkrwRQ658PCkXm7eoNhsv9YqcfNRZawL6QJFE/FQqbpDw1b549lIBIUHdIz7UbDvXwD+1AYY6Tfr4RtfjJWyoOTKowwO3C2JvoRmhbk+O5mIStpzBY8b58Ub19yWykutj9aU5wCasX5eh7mUjrpKa6o0EO5HZK7czRZYJPHw4o3OEwq26UlpGfGLt8KDWTcVBd9ae5GAOxYB2459vRcmddc1Q39L5wY2V3he9UBrdbDXd4YcHya3y8I2myETfC7IpZFV2NicB71T/UmNEUeh/sDcYmfRjnC3K+ufl+w/DN9Oga9hhHILE66qm2erPxT2A3bfE8Nmdt0QEeY3nGGzya81ZOd1wk3w83FPZS2S65Mrp9OWNEoUniNUsKqf2ra3F9DtpPEHw6xFrHVPX0v0eQXjijTQ3UX2CdioM1uqb1ssSBdDvYLfIAae7R/s06U+c3DAqBTL/SluJfWQKFnM5XfucHX63NeuS318s4rSVo38o8SyPwhS52KmYpHGc3EnchZ+IuHJt3sMAAJ4HBFq5zqkEWFTIjE4W7TuTxeYV8uGlvBPpmRMVv63kos+WBYeKZGxdjd44yGm1NTw28JoU7e5Z5RsXGnn1D79Eir9oTAopiQuJ2lizmW0Da2M0cmno+T/NVTzgDHRGIn9vq24zABikyCNoK4Nubi3BqiuAUlRVdmQ5ZozP2vOvprH1H3uBU7AHDoIGwLRQM1Y81WKMso072o2AQYGCj1+tlLVkqBMf2UMkIsuuGFEL3YVaS9hlnkVaWJhmw+nO4PhXbCW/J8+v7y84uxvQRo+Nb08fd5k1O7PQEunsMr7lWO/RsfE6NblrzJY7irNAuSDdXnZZDbuXkqGlb6HBsDMnz8kWKgh3PhKauHPKxzaPHWTalL/5xWt2rW7zRuLXm8+hOpR8blp4t9CHC0ty9Q92EDNQSem554Xv6LiCM8k523/i+2H242Nlr/EkB8PV741lYtDHNxbZcGBdC/vKwGPQKxS2PD4UkeFNPDTU241tsD7Ib2aqzDk9rnXywBWN6CVp/+KNkmgcrrQOQ27KySxxSBOdzT6T2jBhMtH/0e0IIVM4+A5M5LX2kNxVRTxxnCJGd5bkZSyjqGqIKQQJpWYcRUhFOPJSoYMzB4EJb2Z0d1FNQMmEGuJG9Flrc0FQMEERdApy+QI7VMocgHu85tNHbaDFtRTyWv5ipyWkmzHzTXYYXKwE1XDrpZKtJAOj4OSPeB0su7198XZMOZUm0Mt3cEMrPA0VKMn6ZSfeIKBTPPeRbhmtnn+AlEjzxJKYvjp59k3nnaNfwD/E/gWH05bSTw0kEpDXF2rDdcsZu9GzjLB0TCHDr1EUmBmCuDJv14zopYC0kAVi16geaF7rsRpsxmWvfsqiNw9G+N78xBwRc4G08Hykbq9lOEUChWY5vKo42WOBacyOEV3Wopis6XO0+ItEOujWn1SPjWWpEFu7VKZM7COBvhEqbeIPvE8jrUaqBverYU1LqIXbmLPjFTtk0edZtANABqZG5pz/kmKlNAipk2+pBEH55wqzy/txivkww1YuNFkgcXIeY0lhXlwMtROpL8WyX5lIxkP7VMVDG3F6LKcMbvgug5uCwDuN1WkTKwhSgXYQEFf+GvLe429zz2KiB/xn5hvTsufLT6gAwmfPDxQrF8L+/wTG8nRSrS9HGpqA7ca9CQQf4GI4Oi5VTfP74reN0mwStH0SMq/d+zjE7TxVDvkSFBamw+sCEhbS7Ivdolj6xqGiWX7OpxeByXMjftrQBZEcIN4+tT2RRBHkV8UmtwfEeZzZTCirCFukLsMyG85Bs2sUUu6chJiAXXERVryKYVMkmxQITKOM8Rh4kWc+hdalifzCQA1Dsk2nhuh5Sv9iDmsFqAwmIv3jtBR/npaCFNCVeOqTVBKCwPjgIO51o9+fnICoMnYnidFrCPX94JgVoWcDmyEXgqwFdZwOKwZIcNDhMSUnJgBqrJQPKyF4PgrMEjblz3yLbtHyJpHKRqnFymehpo9EdYlOxYnziuyLS9EYeH5+jT4vABw5GqB5XseNzKL3TGGGdZozG0T3gWDsIoBXQ7cdMiZHzKVx/eJDx4az36Gk3DKx9qAIgSNknAjLqMAxkrOTBHVcW36CxASpTxtxh9BRMBp/u8xWRjACV0eDnGk7qDjF/fchBdQfa26HOBWNo2fx4HCXlsjlxtj9dGJydDVygCPEqB+6a2G2GrSfieX4+hI6cvV7ela/phjGfuj9NkrGDFnV5yj5WMtwSaQ50aAzTH1ETdsi07MuCOiQZwhbyFG+3ntNVdOgWW22MgcOUUW/tqyCjWmn7njMydzlm+SaHNyr3UsTGR5KrjrKTPuvytvNQFhVGYLZT+TNtcpcbce71x/ULhDufM2CA6Gag84/PY3AC0U9Xb2NUfOi64TU60PvY1TJjjltW6494kAK3tb6bkTDBGtFxT1KFTErxZnl/w7hNLLar3RuKCB3icCcMhgrJBhWw2UvjN9To8jFsGcj/QLQ0ddg0sVjeBcvDrarVY5iDGBLf04dj/o2eafrl0ReLThuAVJeGK2+Ks1Ni0D0zaOWDJjDgU7blLzup//2KuJjoNJG00Mk50nzEBXWVjYuNHc98CzptoUohvjnKdtFb/b0mqDfb+Iimrrr9flTFe2CpBxGflfE4v6NhfH+oYUziIkhXhe5HxOWpU82BtC4SPcnZa2zWAOE1oC05+5GJ2rVkcPuro0GOm8+zacnHzvyGgzPLMvwRklZzBBqXR50YxhiuUWjq8TYa7eHcGDgT2JGVzYbeWKDb8yJtouLBbjp3mDRd/2g2EPxDddLWbUKchI83FCoUklcdGkrvjkGJNIo+KMREHWx0qUKs6BP3rNSKFFZCi0hIh6eyAoYAWIVPeBpj/5Dpm3/eQxe5TJ6slZCuZ5UspBXdj8Swzp0oxDRhYSFLxd33OzRfK6kegtFvJ+WjAXKSIv9Qea1MDkfzsUXs4AY+vUC3xUrMTEtGo8hpbP+T5QU12rxrny65b68CqnoUUJmIe9+MyATingxnYd5JnjIzlz5GeFU7ocgWO1eHW1W+vKqXqirnVbV83QhMlwkucK8DAnNDisX6FfGjuhIgwjQ+NdDhX1SMiXlyujy9KW2WPY01NQeBg3YKKWQRvTd2x8pJRmgcD1jKgfLwZSJTRW1ua7gsqaxz+2g25898mFNOH5E/b7WCE5bs2LpYWYTcJDfu/7nYlxcOXlMrzfh5mIh39MzSqaW/M83TimyzK4GzewDLGodDHQb7ZMq0B5n43hYIIpuVvkSowuvA9ALj7+Oc8Jrh+hHH5N3YMc9S95YZjL0KZViue4YCD7smWXK/UqBzfaS1JEo7hWo5u66/MfgXSlDDdW5BWgdGuiNh9wwRCl1pogAt8/er3r+M+ZdRBLhZJ2dWrhpCJOTtIU32V+RoJUlZtDK2f2ywBkXXlv45RHg7xmsNVT+tWMeQ94rykkpT4Z8DP2nBG/uv4JSVH1CYwlBhNcYTpnHuGumuS4kr4XhCrHgiLiijp0+xsSkGZZTr2+t6+Kw7IKUZDvNyPlz43I+a+CvcAbCc/i95Ofr2nYazadcV4SV5dVxKMyEs24GxxrOlmWbq7HNmrANRZTVlQSV1bb089DlzVCseUTds0WeRDo56Bu9eKCdns5Ev2JUxBc3KXyLrbbJVBD12oWAbUN56SaC/EJO+m1ghmN0U6cex5v7ElW0HbFsLKYA3cKq3tVs4ev0tR+SO83LLm4fFOQoc+x5DIdwIeHACtVB6P8vZVIi38rH2AjNM14n60SgeXsmrQFdBdk7qiA2gGwdC5hAyPseKGCegq8VHp0e0YIgF4GEA2OBJQRf1fDaewuCxHL+H+dO7XCKtU5ykEHcfeOuyMyVcpkCJflU8Q7ewQLnuTOPgUQ3Fr3pkw9AFV4FoMCjzfNppIQFiU3QHr5B+nk0o0A71/CaDXI2SdU+5b2L6aUNgfiqLn81W3Ir+CVlqGVYFF/8Fwp0bTuH+D1YMZdG+TnDrFIeIN4qIJr10dv5fjgnssFRz29U8h1P+u5LtqWyqSsw/jNQqrvIeAbld7VGlnYKRAvrHfakHlWOTtkRwLpe8oDJ4fLdNrMPb/kb4A0HABkN4it1EzhuEQkyliua+uiFsB8Gpt3ikZSwAGaGJw4aFB+k0LKzM061K8/AFSZ9D+UuopsViDx+2J/v2ZoWwyU7MyXJW6UO8bwZb9AXJy+uv6HNB8OsGPrYd0Jpdce7FobpwgDVl7IDsUqdYvmV0dMGOcoRNG+UJB8Zl5+8L0RP5n6reNT+CFbqqVxrQg8Omr5sUcEcKND7YoeiQBtw/rV3y/gbpmNFi8cV1d1lg/VqvPKzztLXE9K09l0X6F7HZTD8jPOxsXeCakux1aO+jZU/louq/+qZG+wrvSjw20KGwChk7fXnNi/vwWHHdCsIOHRbOO2aDmKUVUHwkfln3+I95ZoGXMOjtdrPpxMKYz1Wr18SHXTqA8tj4StiroQ4eZ9VYYEc0BWvN62Zd3bx65ijmM0pbohwUSkq7W6qpQ22zO7fWI4iPe901VnpluNjh5P5Xx+jiwhM1skf7zgcS8Cmj+hxlL/e5I9vmDYHas4hCel/O+byV0hCUxRknXzsKa3+VYsuw3+3UIlh5KRXqE5hiIv+wjHyZi7VFyBtALJpeFttkjy4GsmpbJi4/4t97yjzRi/zaMV06vuOYxWe0Kej9+/0xSWC1gl03N+6IRETtZldFfteto9UHmYeNkPq9fcuAyaclCvhHeVVNGeoLnaphszkZtqwWHEGbNVpqP/TDMydf+0xPWRL5itwn2NneU0/fZplSzJ5rU/8uDL6taD4+rAlyB5qL6NyZzlJDx7IkQzGZU0iibaRnuCY5arDsangcUdrlS9Ebyujziq8hZElpf1HwtuT3POxLjWlWRc9wxzF4oAZ4p5JeQ3eSFXaggRBDS3CWVoV8rUG8SWD8RMh7ZrcD4QosWTX6vRiyqOCOlH7ydf07zKgkWFgA46WQ4w/SbGzRTXU4Wlof/IdQ4KOFXCJuadtuokezmtQuXyfLcMhMJU+vKni3esIlAMl6A2txwtNNJGFKYTnxIkjz2ETTUehn/zg7OAO5vetLJorAVrwTvyfhKvOtLOPx0JdgaANHqh1sGbMM+5IbyhY0d6AQ9HwWuQaEhtxnVMaDyeDaqpRbN5A7eSQ7nwWtRY7j2OtPyHdSGGkJ7AIcslgP0s6uf/6CsCvqH0p8SOGB+dQ9et2Hit0bTAqVN0KyAEnvgklxYlH6tommzX8Gq9Wmz+I0f/O3HnmfROJsYX5X0PXZyp5Kbu8KHMrBqvjh9JcUfwcj29HwRsx0hvYzRGnzO+6lm9d9nCVqz8iG3qmRJ4ELSaFM4Bcb2NHIWCGpL+yOZow2WsDm7H0f+TP4lAw0Km2FpTHJcpkkHAfTD4o7rAUoFHlHoMwPLdWAbBYVoRO9ElD6yMQdu0M1sZiu/2ApPzq799oxIFhp9S2GamnTSGwWLUr4dYa1e1aiXl1IXsmjnvALgHRfEeLoaw1JFRuFuGzlpGSCMctLhLDv0+1k4Uch1qbXcXbhd8wqH04+3D3JEXe+J2rPhrPrWJeH2XUhdq6L+xghs4ZtCvEKdpPtYR5f+yAKdy3Na1SmA4I3z0BXf0eftVl5JcHGgNCm++HhzR2aYzsWC7qOP9jyjCkVpeShF/AZGQ7XnXPVpcIRP/+hQr0+fBWWDjXkk64xdjlnWHhbE8CvUaozFqG54AWbGuuFcohnnVQ5vENjcGK6amScSI8yjxvbcvuQvlpTT+DTkNocKS2WAei4JPmNldfNv51Dc2OcHCof6u+bCr/wMP5IvR0nxjLbQ4fcqiqskzdnO3efL3sNgofRupci3NRa3OupvJuhcxssf3p9eGTPIW9A6OZVNhMO7OcvGAy7y4BP1iADCZGy8ZYMRmCns7w+Tjg9eJKGE1aTlKZtCbtS3rg/Wcb5ZMGBn4g+JdUj2ocmn5kugewYOI0lxycAAbrPObPmEZqbHpOfUzApvaulEO35LXYcSYrk5l+IlaWXTndgHPvNOgjsdI0JX14V13zriHdjpUQdEzPYIUQXrwQVHsphT6ZKbYlx8POru03Ft6WJNqAz8Ikqml5sdJHOu6tfuHUd2f9ml2FgGm+bUGxTwwvgS3WmmOXBELnzNyYbmslScpSXoCN+fpsUZk4GGi/rpQ2zVcoIszX5cKODccgK/QjIicwPpnBqDmpKygY2hLbQV+fsp/fC85T0ShFFBAcBg6Tnqx7fO9izyCFbJaCij8hPjdD7doS2eg7d4xwzqTOgl1GU03mMJ6J632rkRtewW0Z7Fo+luGPULItwe+uySguejTZ8KF753AmD+7EM3NjsKNcmVVRQGhX01GTGA+tcmiViwf7GHUGOy2xs/3Wfdx2C+/0OcUrtwd2zH5UEkJnMOC7+cKK98KMFaj1nzjmESeJpqHV7VeD2D+tCdLgpSkBBU8vfwO4QRyAF2MBP71X4NzVTuYQT1pq4b2djpDIyzxRg0Hxp9dEDD4xOhonGxNoXhZzX30hXSA9KsbPUwU+EHQXNy8/UgO5xvvxwoRiuXGA8l6eR5ORpEPK6COirG/RkhJ4nyYzN4EAIxVl8JpuxetNHJkPo6VOPLufPvKx0stai+HDsGlQLzmGJPEHxcM+UzZj7Wylp60I841l+WAuBPJyI9gzL9XtmOFp5m7rWk+pLDkGs/eS39bIGCB9yvfztfNj1kDgbD8odjZIPvnB8iASkIKXJCIQfdk7Aj6yH14wyKRlKJ7Q9R0tRltqtdcqKcltwrpVUEQ9ac/7wuwvMbACBi5wTfwHS0LYKtJ1awFMYCvlPvvgfyreVlUoM1Yct77Mc1uu4yqlomlhniZyaVW9uNXRjvKj3LS8eQ4x5ARIosSMlqpy6Yi1iP/4iCLJvPC3sTxjT5FotUeqbGOxbZOlIkUtmKwX7vBY48eU6adHmuVT0/9TZPyu1Kzf3JWrofvtlCWKQl4NqDHhrZL2UUjOkbUrtbmN84qAEQO0NvmlFVxNecomC8sZe0H7YhoOerKpoz076bn78YietSXe77ybqZWDbyuq3nKj2EHFlWzMF5cWh54wnQO+wE7GaRVlvFJQh48VdNvj2uVy9ZvhEQuk6BgckJ+YTuV+/mdrE7SgW/ewHCI+sXZBvd/6scexN1VgLs9i6q1omkGdAq6tRlZPeJgUQD7dsEkaBUja1o+0l4hXBMWrc7Kugtsfa0a1vh6NMfnUFN4iBPgQfTfgt5at2/h7CPW2bjNSe8l/p4tr17JwUMOQwGRragmwuKaR/GVKo7bDbLIb9UNxQm31m/RuSuONAGBY6Mhzf3nNTN6/He9oa9N74fLrpxC3q6ysW4VD60CUybUc0H4cjz4e2Yn6m5IZxG8SL6n+U5LQzC1TPmuWgHfeSTn62kh0SYZXlbW8vAevcAEIr+k04ptPi84ksCSGIocitD5BqOq5aiW6USKPPewcAn5+d0oECzKndf/4FNFD8IGnSCGX9aCnjIniNrqJEIMXWOaqLZW+EIBNq3F7LCWFBbjJcTIjfZkZ2qP9M29+foJ+HF+3M7HDo0bMDLdiVLmA4TdBFvKpf6BqCQSXN1iaSr+/dogUGoeNxiqkEK4QitEL3TqPV3zCOJk4kwhbwxXMsE/oZuzULeho6Cq8CKMku29Rneulz6e+bs4OYJ8M/GJbyhNBDdavbh4XmPktmKtGjGwgsLBZFsoCqYcYTOHZZl6o4hTLjTHXCXtVN0f71eWkocNA6zxOwluIz23V1v0lsohywSzYOzzpgtzoijX49HN4O8urQuAw4v3YziIB74ekNk37g9svg5xDn6v/CzvsS3smmfdqaLjAGxCq9GD/l/gwBD5swBHAYzVF9WQZOJWUZFqMHzzkxU8fxgXZ96H44rMZ+NBIHP4d5BGqpRcudZcr59wygbRJltcRpzSAoC2DWT/U1i/Y6qITN6+RzNYgX7Bkac53lj2aiSa4zeiaBI2so/DgsDYTqKkvA74VvjXpofNnXId12syA07RwbIdZ5telXRUERYsVPh0HoB40CkkyR0xFx5CwPrUiUhm+vZ4Dkw/UZy+y0BHTcXziiMmiDqLjhM6hpCY6562rSKOTrZNVjY2PheFYxd4yjxJh/zwwjA8bwPjYTPTckuk/m5x9Zw9uvv77PCrZ4cXDd9rpAkXicZvH+NKgOcwXYCPNMdFwYkvKr4M4icLrIOah4TNiYX/yThGxKO1shtJVLX/PQQ8eXx07mwdPXuCjt3TC98gE9WLL5zVAusl/3x4bBhO337XCkhxHZYFLf1Gr7k9o4KfSLM3OPLMTKvSf+Vmkzbvguj0MlyJPuJ5udC0YqrjI7KVjqwuDVSRR8eSeE4w3dC0S6SjtfKxPvbWOkWXiA+BY1mFvwdVqN1ACegbAWX5gQFZjezr2VuIYjK6QRdSWCBF/BGXTg9sDFTMTqoTks8eGf7iJMvTpvtV6vK+piwHOyMCttHtLfdL0m/vo48uVyH17gGR+O1fsFCc2k+6t+wbHsbZmEmn9AJ5DmmfI4ZA/kLYb4HiQ/xFzp7CH8WBuxahtBPfoTy0/+pQV470eLYlMG+aQhmujB16sCNzU6qHDLI6O1K3NeKc9KelZ+W7FFeTgXlt73Em5YyliFavkjF8vz3U/xPH66zfMIvdXowSenGhN3g0Ct+WkngdasrUSh0l3mLb/OhdAwaEUyQKksFevnnXfBq423eH+cf4MCH3FpB5rXp4zIGYnzUPmVRJ3uCr+c6fIJFLyb+Qu+EobBBVuLoRyYGiy7veQM1Pymob5TUu7b8mQremFZF07J3IVjGm1Yn1fG/L7UJ1Gv2oJhAzU2p+Pwwg7pFgrdGumM2Z/vaRYLpOY7CXJZe9uSaSvmgc1jorsERSQfqUexX6x1nuHV/7G9LBV+zzJDCkbqL/B4XL1pPRhGfnV0Mq9UVgOS2rnPblC42kQZkuaPDrbWSnEcQo+RdLSZh1FG/qoO3d4UfDjNpLzvjd8P6Y0ErzBKhpMqKLg5PhntPFHGxHG/QBD3+CfFW1Lgd/EbdaJ+M+UZZeafvb4wwB0YiKN2/IyPQ2uEc8zWUTDRvzqWyDj81JuYuRhsiFIZzAduHXWd9TxiOcU2TkgppnJzidIY0TDiJc0q4kSKaUYfjOUkV+KvqaSAjKrl3iO5I5YcQ9W+jo4TLGqDRHMGCITBwjWVfWLBXESnKQM89E8bE3FcAQnZw97Yba8ojeCgtxxXIh1KobeOn+THgL6vX4Qps5heeOJaDzDzVcEyoyWxK8sg8v42DuHoQL7j70reF9shBRF6CVg2d3ea/vU6ReMNdTPws/HrTmFBXj7YW2gTJH7sLyt92LQ9abjsufUL7gBTL2U2ldbnJ8x16iIHzx8rsj6SVTD3lLKIsN+9lAPn+1+JsCIds78bYc81yxo4QOVGDHsVWM3xfKzv6Y1lzg0vj/X4HAJ+XOMf5/x4JodoOF3FWrVrLOLu18PwTYgy+OA/mDNcj4N5Bk5Pr8zqawH2QMDgdjQWImNJttgSpxQXqkIsasmMKU4wJA1C/m8zm1DBglID09Or8bWog97djMShUAV14v3W7YXJ2h56m3YrMLAY30DRPsKdJFcVHJv+iASTc7QbXnsfs+BqHpREXngXSc2cDkEmIbHSvGJlf6g3TDHUKfcSTCbgNsOrfIA2EfQTBORHIQx0/rgrOGJucKI9poLkpW2Y42zKIpx4Eo2azUf+ZxpMwPcNtmHDgG//xSPBlx1JGVueYjq5PxKX4HJo3sqxHNL0uHcWL/jkAzciMuU96GJbxMZ8iAe3XM3UR1LpMpn0mlq9hkfrRynopKIgXjkA+IH4JE/8ttzgZgLAYpo15CPA8t7LaGx71DsWs0CurOkOiH8JCe2eFkPM8NmxRAOl1gn6zi2JXmiyEA2cDfOpuqqadsVVEJRfKUBCwSS6flBRpaMO5Ngl3M2C87Gx1KCkJHZrZfi/WRkYTNdOtAGd5KuNDOu28KWhojSjlNh4N+qqS6WDxj1zulAdUMtbFJsopzupiTxtmtC4TrSHuo/gUHHooBstYbJCsuc1gf6wF4/n95cxcIzgJsQzo9yq7Tcuv7RvTUMJGX0lRE10iqQ9HQGBMwK9rrxdSTEm8h/l4UPBz2FUNcN6UCHKWcTwoGrWQju1AbWSnmAbUxX2uQYQ2sA4O7S5Ts0ivXzoZ2lsgBbHMmMAOUc7artTQHNnyc9914o3yR72AKSTUcE5h5qa9+w398xwJFtqqPAsDk/vEb8P5gMG7Ns2FstoLGEqhXySrGBZfHHWUFfhA2ar8zzUo6MPfZZy4loujryI3a6l51pMz8PX8HtjR2+YX2xeuG+mv1P6NOI5c5PPrDfke1TIdzUELH5JkfgoVh3Jg5bVyKi3g2+5ctUOYcHLdQDWvHWl3Fyf9PHJ8gAZAEX50go3f+RGzMyhvqMYRlF8Yvqrhbo4MiFftDkC0A57zXE+A48Dg19IMkad+/KAuIw8FQa8SqjuQFmLCWqkA9ZchOO00PWn2p8Xgha9cujQ8W3gHUi2ORpwi1lwZzEnI2YUgcgjP+RltlPC/fvpoXv0CDu/SLaT1fvEMnajP0SL1QVlV4AaXyPjj+DuZDDphuw7Q8tSqBLjPxB4LfrxQoUWCbSp4XoSN1U8mkDvd6k/a43d8mMIvxb1AyOOhHBoH4+McxG9zicubV71CqZSxBmypxxFjoLhjwHV2XkffIQcB1L2jTiPXu/z3MaLsRRcmSF3fM2Dk7zfZbJKW+RzeXZs8+E+mw1Y5mHE3W4wUgrvEgcvom93kVUg0LidNd62qkaRJgmDm5U72GcLdR7p4mzdgwouey3rYb+aeW+UuTH2FBvv3x3E0PAvP7xCXh7iQEKATcUSJLwlT52RwD29hfUBGIWsFpdtA41oYyN0u+nButjAhnBKvMMGoebDT8PehhwvW01nlI59Ud2oZssqxA2vyfF/YeFNlbA2tKzeumk7iYarqDdPE20URxS3qDnpF6qEIKym6St55AeEA60C+5/30d5HClpqwFINAI1mV9vDTFcf6jglwf74VyFgDH6ahhALvgjArhHmp0kWGGh33wlDbfBetFSsQhyFNAfBYnQpWsLaqqmlO1Jk3XpVBOL0V/kOt/O/ymBrsRrIlwJHt7/MMN3oXrPFDkbisfDmxtRnDuwW95kw65TdH92V74/zNvp/beoIpFHDheDpXY7/PlmolojzkWaSCmHeJ7ZCFRdDQ124oOixYVsG39mL0lGtChJ1o2mn/ibvejiZk4C0qZ/Ozk6+1zCNoY7tpYNmuvutUF7ELHo7lcAIWYgB1RGmy4QGpiinxrxSzH8uCkLFh5He9ruKhATlpv1HLjQy3D/a5SI9DGco7G/k/ZSiXcrUIGkfavuyx+vZWX4EQkokWA87ospkdyiS8GrxyI+6eJQg8Wh8n/VCYB1H8HooKIs3RSi6NYiPP9PYHdTngOsSrhtGUvdy58mAJgipWNndq/uS2YcVZQ1g6OfvGGN0wS2pUTdDbsICGd2e9MJ+AM5wdci+RGIRRCqGD3USLqh7gITeOZi32xH2spPZL/vOjq+1iuhCE0tnnj2SPX8TgX92tKgsuCDokXuPj4pn9OPheMuJxjpxxDckRJLz3zBqK1+k4+0xHwLv5gdD03S3A87UAp317+oQAOX3BlkUKRuZqBcSQGu0bC7XDjatC46qAVtvhTolDrH0bv9c9+vtMe4ga5HxjMTJXpUHh+p0NfezRfybGGA24oHtrPbZyGZUAVQrsOSylbtLVVFGVEiJENCS6KibqO3d8EKIE4xVAxJQWtBUawUDWQtVo1v1Y7SdQSCHuenDHQ+Qx6Fvj8GkDhXJAakmdlmUCJISqvv16dQDhgxz46qyqNgiK11zSeQZ46MMjFcaGHL1ZPQcbvaqg726f7GmC/uyfk9vB4LgXsKsmnt+6XJBlIlZuIqJYrCQa39PUHe0ELsdR3SqvLmg9Ki6QS8pAB6PKrPiMlx0uNLqwYaAkJSFvA/pmcvHeZ7+VqKHJEfWvPH+/TULMzcQtf+jClndCGh4ZCevBR95oSF2FIFgML+JF2Gkw9njihxN6A3lvRYnZwNWvIFNYLSoEnPrOGvZbVPYIEpQ3dIX+x4TbaHBD6IozgM/BZp2c1KSkyETAZXmuCvzanHAuOPQi7RC/5cScOZ8UJZcJNMghUlE6u8cy813tf9PMefFawn8I+aGeCo9brnH2XbyIsFiFZjDNBK6M6G8UPsCtLINZ4rhyJWwKg8wWerf/KpdQO8nhxBuDUXM060mgynULaetld9oEzFKHLjh0CrGe51oBYmyC0hmT67pyrlB3nPGnv3xa5nFWRN34sbQKXPc42J72MP5lb2MgvMXw23MuOTjBhGsqXgnx5vcc4+L6ANwnaRUqmeu1bYkaZPJ3rniIlu+7dKX03BP4/TZCtM/H63z8I4DpPQIMcGyEmKZLdzRgFFF6dLj88w6HaGjCu1Rmlz27MchyVueKDNLeaVghm+A6tVohuKRRjC/ySs+R4Z5Ax2JrMis5UsJf41MDYPk77EV02bPBR3/s7NyiR9SBxVPw2PV83U/ZR9aD8a9fuvp+qJvCbyxdbRN6eeIAmidTnZIns1ahBkTx1hPaUXZoyA94u+YQCTRMNA4iF8ICwtIcMzioOYJVU+m4ZUbn4fBLiH/+1OSg7xlmL9t4MG5qo1cKmgHpZn+FZm3CnErgXspkOEgfl3IiIKAYF7XYxE0272Ne7nOaAbzBvXUq23gAGngOgvB9QI4CjdgQwlrbnpiPCIQwcLrMjn9foHYWBJCAsy3NnF4FF6X/pjcv+XWQkp662yoCWCrZW2HKD0S0B2pe2ROW9oJQfThjro2Bs00w0MA97P1HyP2l52Qgy6wZjXwv0a8aKzbY9BjA0FLKPtJZsyjNTtRSpSnXiKA1lLeqwVMsHjUCEaCWqykjgOP2JakjBc6SwFbZKgXwhoeYSNhyUucA1NzkKEM9pTf4OA3Mfax9RYVJRjme3Q3To3woYLrfiWXIKaGHWwX4ybwqyQA9Oa/+9Ve1LyDnJ5hWyHbOzluL3AO5TeaufncoAe1gDB/TBzYfSyWVrsp+DuRHXDMsj6G43U2VS/CQzaxsMRavW9cIAgtv8tZ3CyE03kavul4pb/GQSao5X4wGiTc05GNmvfZ2e4CivIzpgadnMD7yxUL6qWDn1Kf/UwujqGzSFjC9dXdKb1rwtsL707L5lv/L2COnk6RRsj8tXKZhHrN7pqgUnaY0MX0zDB1/aXHlBwFTAF3UDriRYEP40KAcTEuKlV+2dQ1xLyTBSeJozRZSoDlzoAdEv248pzQFLIY/+D+ojYiBvxncoLo9kfW3GiDIOkR/XrN7swrThjZwGy4759DxVQzY2O7A77OmBRnBiEgPOO2woQpdnyJPRNDRfvMESLINgMIeRgNZ+kQ6+CBqmS+nl+pXXXg1vnwYhuwXROaYTjGmkLIcDICiBg0BnJ5OntJUpWtYOub191UfMdjG9q3ZmvTigvz2QHLnAIUWUk+igmJawPSNSNg5M3xib8vUgsUID/IkFF8VuEhqAXP54pG4MWzUs/wu1JEtBs4oNhof2ULWAJbgLtwgTqpMteGHyJimKET0DJpcLg11mYAQXkjC6i8A3UA7iQephW67lmov6WfsWS58Z/9sGfZNNyAEDFSh8AJu7SUorjdkD/Knen1mcaQDEOslHXm9Fzl16pzH81D+Syx46CXnQ4xU0TXW5ng2g140cw0LlgI5s0LfNiX1zu9jYOsWZN+7UeGtGZPZgo1Ypn2OgYji6S3GZJfekS2VUoIDe/gIRFueuBC4YxuEAt/TJt6NxIDwPEWQ88Vv9xrX6a7vOCW7/DGZKTw08yILzp/gKztGVw2k4po4pdh475nql8pSbWQVw73vZGMwN8uaVHns1NVgpHmR8gu8ug6a+2FSIdcvccT5tnWlu0qozn6hAMaVTN/jwH3fXrMb5HaKu8rVdVP+m8rbY4vs8qJ525RS9jFTTkGlzDt/d+1PBgUA7u2p8tBl5CYFE5F1NQaq9MUvDcBCP4ECtSGP0NyaHe7Jj1fC2jcvEdyA52KRhY7VXhLRjYuEAyCsEG39RYUCQeQPzBcnBPAjjIulucfGzrcyr/5bu9LzX9eTvVs7Nv42CmDIMzT384v/Ibdtb2meug4FKaWxABeJ9a0fKDL13AM6p5aSixyH2IVpXp4aEm7rENcw9T+NVt+VfHUVP61YefW00soghAUjW6Ghfxe/0Idc7BysqSR5O9QbueHy7VwEhc9inGdNREWriJzgUrYFkT9hX1iI3Zn4ynYYoDxcdmLfjz1pb8eXsPa8GWS7RKpnLQZ0bwBabSzZMRMgm4i49Lq2L+OES/0fNn5PWPNu0SwyyrSKi7utjS1m6qRl/ckKWCiQ0AKyG6RaXD//cPCqnLX0suRg9sgX06nH3LRoxgvu4gp83Mbry2RYwjW91whxg09mOZLgBfqyXkkmba7Eb2145kjXz3TfdpztR9N0cys2nhMWSGN/a1uRNpX9tXM8NNKF8M06tKNfauu0v+Gg1RKDvqnpAUrpHVot02Hm7ERGR0vSIpJiLSDeOvRfeFBOsMU+wifE/jzyOt2ObLMkVU6k8txQyeyUqeYW2R8JkQt9vscYx+mGAs+lOnDFZyiLlzRnrSCO9n0jmaI51cH178HRaW8AvWK9iMa9Jjtbleh5ZBp0afy5nSZr8+KyFni4sz928ig60FOsrSivTrysuqdgFp8MB4+GeC1BYwAZUU1yyKgBuaT2FERDo6wTv5Iwl3j7EHXH+haj3/P6SBHk0dQXA5K7Ug/FOLv2gW7CILWk4YjRTMNWZRsOJl0Dyo5e33KwWinj5t2k1Wjo/CADNCNZKm7OOozUItU9T6MeZ+D0D9nQkBY5kQrbfIF6GNhb85UDASF5kVXfuwafeobncLc+9BnvOJYtw2sW4RYtE6OFGZ9JClvExbMjaqiUF1PDgEQ6mGXJYHKtw98bCNqvJ1R0dKRsaurC+mwBNseoJxlagUIFN/hYchV2xf2MYNTokRUD7XQdYH1Gk3ocWS31sbCY6FSNfweSKkasDZLhSqGm8tYKFE0iNJ5nIK7gVb727yvU5B13jHuHroMhiODDJio4P3yuXrJe5+77BQI7h9SqE++q31YDkn9zMzdeZgL/j2adFztYbhxdXZpBH32xzcR4L9PN037A+oo8ZFLmyk4MvsNojRfOjBw9nQ1S2ib9a/uybkDoDtJR1t6NxmWbDogRUh328cLuW7XhkviG8Uf+ZrAmgz0CEzyPeGt7wBh5R+GzBHuJRrnR0WrzTJK0Ts+BAjaYITXm2qliDFuaTzwgcrQGuSbLhKF5FahYq7lILK85JLa/G87Wygpty3xU/nw27ADx5DsZhZP2QsMUawMMC0wRi5ZJsIHFPsvrZu+OztkoXS0WqGlbaf0aGuiXMW4ynBTz0ip/PSgP1esHNF5DrERzKLkuiGfF7Wt50hOXUMAqIQjUOT5WouLC9lVt19IAXei5FQDvogllkVfgnryH6+U+fAPq79e6x6t8rOY5Jz/2dMqVY5XX3lkY0fstowe0DzRNbRwtcC1yObJoZp8B9yz9nibg3dVpdRu5OMtVUQwFVYGgrIS+1FlvEP3KORSahKhBjS5ibUSg93uI4P6orLkmZi4NHbvvv0R7Mp1Gc/czm7/QhDBTvtRijO28WfrLXSZR2fHMXBgyZHGhpuOHjv7gR41miUEBxBL0stEprpd0pDSIi50la1JtvINkmbl5LwKihE47gILv2P4q/eJsgrY/aEzMdJUc7z0xXdSvMTkO/eydM4SeI0Ijp4vvucLIGu+8r+pVLB1gooC5c2ad7HBQD74OVBQn8k0vELELbjr2rWzkBs1R2ExdKO2SX0/gWLSxVriC0ETokf3x6teMFiQRuqHy3MbmR5kTBBf45w2L7WUGNo9m4wCfFSSl17H4En7YpONz5wJ5gFxuQqINJr0bSLaPwrKL9I94UVNml7gebuSDSKZP4eNso/YaCpNOEg/AScDp0dL45p/WBGMZGBFqOFM0QDcBJvaLMxgVWsZsBc3srAsvohlE0tqyKz/uC0WkhIXV+7GktDrh/KctTk35TArLpfjbNOimohvbOUzpX/mkcwPK7ykAQdDVZmULlJ/Szop+8SzvOf6mOtAFE8u89PE+g95/xKHrsV/FJsKuzpgkYPevMnVMtoIAUkdX181DyfCd8xKK/Hf4mZHTObsT/c4mZkPj67a7hrsx/sglhbRB7b3lAXZ9E6XpO2DzwX5g2o2EcZ9LN0BTuErt+mSV66oixODIhXwsiQoiWzu/802rJbHpdT9gkfP4D+AkH1UYa6SQWQLFNn0pUAvsgBsieZlYaqfAlSSRGHFEGhQp/vT4ny7pqZhLmlUDjfyLcQZbyoOJumU2WpCyGHXRb7oD8ev0kN+GM0ih/pipNkleSoYXiqZK/iU7xlCfRXt5nQPw87NhkYRfydj893a63UIqmonO81BjDdg6C1Gi2bSt56j2uPDXexG1oWQlmlRhAEQv90hIxyyUca4EL0d7B7J3O3k01u802CCemFHmmTXJuxTH2ZjMsNlU8lu0Lv+ug67n4C8W9ORSioJ4OxtTuboIkHmGRYXn7whFaXlFa4wmM0gCsd5EktN1d6M93k10AwMl/J3q5K/DghdEuutLEWUSLE7mCS7EMWkE/1jbYJq9n1cGYJZFj/RlQGJyoAUkdJZ7WnLMCEiUxaCn+diXAynnbudWRaKFQC/yUnyOWqog/iCW6oGMVugj6jM4DiOXyEmhDaOZtBaAuWBI+VsUMeGV/s2MrJWUC4Cv07CkpZQ8zOZNmcoAs9l4oHnM/WvH2vmPcURWFAHNFcSndLlpWCLUYkUa/UTtacK+pSx2ns08w+Ly0naYG2Gn3pDg9Mn++crfODdI+3zYbyEHITv+U61llR/Eo2zJiR6DBDSGhZR9xSVbrllYEEMTMA7MuBK6+wy4wqoXLLJ540JYKrIPuGuAe7AfJd+6iyzYRq3peaFTwfZmB/grCPHVsKzbtqf3/Ck7qCAn5CejW7kUyGXzzYjox9z2972J6e8AipysfGL5PHn/LDx0UEjK3agI435BRKHbpYZkCjtQULRr1cmsMVtKpayFEwNERbAD5/1AXAz1Oy+gUwx+jZPpmQyjuGTeCft2iPoUoMf0arHriTpD2bgibdyXyGaYBr8KkypnP9JQU60hgkghQIuYNmA6fi3dDK1M9pHtGbIbCSRvfQHVOKt4q+fimFsAP+m2eYbnoqsEblOcu1BK29nRaSl4H+hu5K+EbF67W4wj9Iae29tQgfhKtLNIbbsnqNDriDA0DB4Hb3dU3CbYibFA5xSIQKjXLqdyT0oPTE1EnskZiJ7AY3+V8aZ/7YefJWnVId5+by+QpF4H8dQHOXBG+V/dRMC/SXxLFsuSgEl4bTAYYtHIQvalsPMftWaXSnhIPVrj4YnmeotOa3mzEhsHqu3MX3vMYEIVR+leVscuyMXpZOUR5WPo08FeIL2pRhWoRRgbtijpBUxl3Y2CQ0cF1CroIvXLnybmFd39adcD3aYGJWy868YsGwMSIwhMERcILq88ERzZHMbsOhCjQFtg2mXnbb6XjL3K4SYI8IOPzXZtZRi2m7ArMkaZl6/ul8RLpmT3z4HHWfyTDvI/5Z7ubk96xHOhw0ubG9xrzP+U+1lgyjfpvI9qCqUL2wqOPGNZq+DgkZxsQK+iSF3ByKfSCGSOoXAlLyNu0cp/nxBmvuu8S8h1BHGTrNW14cwqAj5grydvEKADcXv0J2jFlhRVvQz+iZe+FFuqNmuPqMnm4NbPfanYTyqpfOySsgUsoxDfOjW1B/NP9tVqxmQZhKpv2bPq2WOVm+aVefyehbKYVjQaF+AuY9/1Sv+p2UGuPo82yGyRrzHwpva50t2lFKoE1epuiLqX4qEv4Hf4iI7rLoWXcqtHPa90IWeLDZe41vN3aPu/AOs3Ya7eyd6hGoCgT3WuGeqBWTbiOXyv2qa5l7p+/FHfSD9RcrS7IVqhTYDC/2dArlY201SH7sxfqKMlfyURz+C37baAWrzoHUAZ7Yo/rD9hhnOXrTt9dT5EL5ov8ndJiXBIX5k4PUKpSIC5xf4xtOVcjhJ2lTZD7IZ4RJpKkb+P9fiR+ip2oU7J36qfVZHqWfHDbF1J1VzSjqry9cwYoNTY/vJIvjy+3P9syDcSA92NxmkhP07I+mmyO5k2AsxLEYmr6INBaDM18UeViXnT1QD33hm/8WHhUeBBYFeNpgW11vSlX1+ZzMtPLNQHPKRJ3JHihylx1gP0i1By/kAWOl5QPgNPkS9uuqBncZF56SXkk3SztUry0Eqk9fEjTzywjwkNJCShxHE8u0/tmb3XQ842GznYY/uZA8kQ2L/NmIJDSxSbt47bvEqBpiHyboVz/aKLTZFmlW+0t5IZC9RWoGAVL/fAo7xEQ3LJ+XmPuEGg5w3QA/JuEBAZVR/x2mG0/aNHI36/A5CXDaoTlGii/c+71tMb2uI5ECGW7gTvGHOAQ93s/TGHCedNi9hVOwJ6D8IUDBQ6LzYobrZ7k9Gua0/PXnbP6OuFSkvYUqQevv4/l5jNSWLJ0VyxeiItynehKoI0LsoFPJGM6FP31G39Lqf433ZDMTCb4WVyY9eYEfDxpHB0Yore45K2yIOChnScYcCC7uelb4lSNHHLqD9lhMtdKH+FZKVkJ/BCpuFvmBNw3Y5PoSH3ww6bOchA7nmY1yAk9PiNEGsYJhpWbGcLW8VMGKUZDoCzMubLNltQlseAn3fzKUH5y8WO/37eUyrzFLXCImBGCqq5jrzhsntctqD3B5TepBNEu/lJlftMhdshqUq0qB1HSe2L9KroJH+a3QOXKc1XD+ndZp/kbQixRD8uVcVG9RIHNN4KscI8On7smNJFZmGfjKJCU3xpYxzGz8qxerim+YW36QBcfAAexvSYwXQveRYZPG9pc3Jti6vNgbtuhFycbWSUUl6JRNX5qne8Yjsv9AkedxPBwbegaGweCwJO7ody8FBuWlcQHdQIdfk+x7lIN8xn70y+i+gLRflBZAyl0EZ/S5U8Ad3VnNzhJ68Q3YBz4CgpuILBs28rCeJHJhAOMIud2xrIpobKG3p6NBrsOEV03CRj4b1KPXF5BIZvpwbP6PsEoJeWb8hKOe+TC0U0IdKIiVZWB4H+RetKGCk57JHi4AF5oOlCsFnEKXy5ywakZKHbPzy/mRy57DDNkedTLWQqKEssG2jB8HuU9mdUVXf1p/jumHp+ojEAD5GF5EBjGTBC24SA/PoGd3IWzV9IYSIoYvQ44qAfI5aXMPamqtql7zq1lp6VVA8k/a91GPqXV8V2LJlTHD1huNLbwQ/pQJupkQwFMYvIzeeGbofdv9Qnskd93H5zsSSZ17kUBuN7MPbbqFnV3EKdJvlbDdXzzezBqkSARcqqT29iwTubAf9+rYKuIwiZdqa/yW9eWvWXYm9wayCFUmYcZ5EIVeY4m+Rx8Ytqg30FSrINuiDbnONzq5NoOXcRmPPlGvweZ0e2ewqqDmiNPuFUPB6BpyPfvAskhYTa1BYtIg5WBZGObJg6mv8hEvqQTJPLciRJ2Z2vaK5ZNQZdNlG2Phml2kocORmlr8m4R2EQo8WoKavOdOxwTT9Gun9E1CAgQDlPOJ/GvBO8rW0o3oECfQvJ3gCMHk0g56d9zVPc9Y4A+w67vn7bKItty7k/eqGrw6zuxLGy+PutD1ocLTFNO3EUxiEI1/u8kPIBJE9TVMfXxEKze/Evnkirp7LJCL39osevQEUOpJr84g6y34rQzSdupMvOCg1ST8huRVwWB6VRY8tdj8WA72qjteBazt77Fc8BCXKW/yEjgy7fFoupCbEjFp9iCY9K0FFeNH+YusfH1RpuI0q0L7QjzpSF2mY+GHJytCa1b7RcucEqE5e75hF4MfMMVe76jvnpMRyLqmM+RTWZSZERKbFnE3C40XS9bPLA6cJu0j2R06TOxuKC2WCwTGmABkDr9nTBns07qMbx+7+FgAtiqBj9i+VYSiGTwdXuZNDy8GMgt+tbsWdEY8XT7Ka/IE7Df7Rj4awKk9XYmHQ/b/+P4cTs1xKXMLh8yYqB2fUgFCJJRJU1h8YYuo7c7waP2Nmp10eXkp2R11DHkT60eBUMEGZ7M3GZPy0108PePlbM5eOB5TJSHUdHkro23JlgqoYZomshC+U9V5Y78vbUgryGgD/PdHUxr6lZ/NQoJ1+oGpfooTHwdtMkZQlLDBijMlvAIU4ibJnuk2gq52/V7l9tezmeJJzRzkEvSNWOb5VtgmJ/XVbW8J+sGR25vg3PXqh3DUgQVbUfOGQYTbvG0iz9/ywUGauGDW8FvXLj49kp5jZERitICGShUAfs7Aqx+dUbCJ4FsZeF1umjpqa+5ZA0w7yFLhl0Mu5sR+vNYKNzMdGW/qdscyJBII2hwaTjMWRS4kwRlVU8xJvd50FGAJPbAoU2VGSbJm+kcXDeJb62ENwfi7IapDK6AY/px36a8bhHTSYZLRj8OyUUPyiGGst1uDo+hldSQY0IJhq5wFZ8jB1FE4LKkJtZuYcqxSuAIlYISWjxcj2PFa/WuyfNgHp98FFuZo5TM1xizmY6WQDlCgMw9hKJuIAyXat1rWY7Iy0GjVUqojqCGNbtbBmNql7kr/IQlZ1gTUlLbSRXhcIPk2ZSla1bqjzYON9UTQ6AFMhUnVdEgevtr42LKdZ1FjTYA/6QgcsDgkqsxauw0CKL7WutSKqsiPa3idMomNS+OWdYiOWeg1ifaZ7lMsx3TYKQfzGzAZQqbQvgIbbL3XNGg+AJwMCAP7SyYoU+D0uDc6nDQEyPMUYS6e9r9TBFdv6wiT41Fej1ELJzUYBVdSIoLo9P3WBdHhsUJXvRFqPH01q7TYjAtMe3J5UFJF5N4LZBNcQ2BO3H0wf6EV2dazCXmE64FMpN05bhmGVUdrTU67hSYTzKcS02ayt63wbN+STNO+ydFdMHdyCrgNjC6NaMLsjSPd/pNuDYSAGAADA2LZt27Zt27Zt27Zt27btj+10iA5ynP474yk1O6EwA/vqyXS2DZMngX6U9A1ODcpeLFg7JxG+ERmcxOjsuIh1KiEYLv+sBHHd7fuLkFEiPSoRTaTjcJ0Ph18eztKTBhCKOjEK2FpwafLMZkRUkw4E1njUlRze8MLJPRhQ54yI+/HoxYb7EwuAL615fR9eq8TzGmLBAo6C5u8bn98Yjyz1/polG3JnQDaHYVip1zgCoNxsC18ZhcJ3CLD7IQ1U0YC3qSCj/BlMAudJYN8gFo21rhbgrFKxVwatVs7r1re5AhoLkq1CP7er5GIU7KUMUVkAxJRMpTW64OCKcHs2OoFjZK4gANrEQLpOTHOVMZkHJX6NGFrIsaSF1wnbCnOvTzOMnp8M1NGhcRfp92MP9Clw48C6Ew8luaITV2a0CDkICSo1T7Ow7pusrCTAazeLtt5wH43EFJV6+I5kTC8y+5KJQ4sqWj+3TZe8hY5ze/Cai8s7qrpssBnnewIxsVBMJeikddlp9LaWElxuSemZz7B3juQD+n0a84e4rmf1d+EbIJJxE4rpXVBzyIXH6R6S+qUsLjjSK6ygONMX536KRwNbvs6C3XPtR0jCTvBIVOrnWCvg8iS7TZhJPv6NQxfmPdQDDAjsUime6nTFSdD3cXjFRdXhEnNjNoabdSi6EM7gCj24DxYkwwPt21BJntQuk2+r9T0dg7GrD42CWBFlgMZ3Y8JTwXSfx4vGiXoyQZt9hLdXSSIPF0sZAp6MCC599vX6ROgl0iytz4T5sFw0ImyDOqNidPBCA2zDTvHSLt6uUG7ADAIx9dvs74RHlF5IlzIZsJOVWqhve9fkx2wHQPkN/Stl735lkPKeuMCaQZ2edT3GJFQmoBakwlBk93hHZJR8hJnBEM6IFtq9DtsZm6xmaw6sLZhy0asutr4n9qNgUM+q+oITYit+jJwJZBkGyiLjeQ7F4KDMyGIjKc2bR4LE7ae1rri5cf+IRVRwzCmbG9ykWqyZEnciIXKwIEKI7m23MxD93G1HaeDc8ojwsdcOPiEeeKyKnh4lIWiyn8SuPHMqRJoroNuAw2FkUPgJBqoJoldJIWqcEikN5ZY6vrj2md5P/QawsQ33l7IHOZgBX1toYW7qolCCux7lzTNVumqSArBJB+MnA3VBUrAb2gY88qUm9b9hbQ0lU8HW0Qv8aiwu9UKTKp1Vj6cxWbollZNzrpkEZaGlWjUc5uDgKoGxujyoC9o8reDQPXz+VEFPKwBl3iyuElNz0eAFH2keFmn7i2mLF1ihqci6LNHOvDiEzR5Dh7DHC/+DDISixSs06ynpnDutTXSmJomlFaPFKQorIomj3QhB+VnWPXHYUhTNNNL2JB2OKdGLcsoCq3WoBHfHbofMRI4PIPiQfFNbPD55EmOlg0PsIrDh3v2NEXIkk8p5SO6oBvpQPJ1TQZndtG26YikMkmxXA8N2sWfzWGilaKqH0BJsEuwrGlrnGNyWtgS4c17XApUya6GNkwWel6Lxv2p2nJXNpcAwLM2DewlIXKPmcUj0eUBNGOq0GaEK4cJbijC32cJGvIV+iREWLvBmbuGog9TicVAW5jgV+jOPTKCGGWxodTeeEaZXmM1asoXHv3vEHfP4SXjF9vExBjQ8uLsVBgXNXLWZs1LHRsIG9j6WDBJy9djWPlW0H+D9J8MQ8zlYte4pi+692QFGh0kRRaqoeZtLtK1K2hzVWAZtgvrur7ItFujLbH0Z2A5adBR9be78Y+6jdGACnIIIJbYTuu+DzB+7UhRnhpE5pRv0o6IjIte2MZtuSg8PTzq5S01ifbUe8FI4Np9xMX7PkuLXWFcB9UyuqOc/gX82CTOJSlRZJw55kJZMW6S8uFUsvevvbHtpNX2EV3AU/vE30AKHZ7RPyJ8midaii1HBWmZZlrYQFdO/tqKnLlCVD0X4DMfmCPee/ttP/aZ8S0vvGiLVJvH4h0j5AAi62cmTbjVa/VVkSdrM6ABc1WLC5qa0kCsfQDkOCqzX/qADSPA4A0ypy/qG2PLOikw3kVbkZjPYcw9pnGB7PdF34/hogbHywfqcmMyUUs4vpee+HX4uEDbr43yXDKQyI2LsKk6I0pCmKehHAIjZWJMLyv/GShYHP6BwsOst5s4dEBy7rDrqEz+WB6CNxZD/pAs8OQyPxSMNtsSR5Br4LgV0LdKZjtIUTARRBkmChXEmMOB3wGC97oK+I2YSSXRt7/tvm5L69nBwin4C5iuKPLB+oyHSQBWV2juDWpk2ZuPnnWPcocOnUBy4IgTRdpPCpWbMwHZ31zCuQfkiQ+d6662GpGmwPEKOIzFwrNNpKWKCxTVJoeFS7ILJUX0HY4O30rv+W2aXfWbXCEN75md/lmRkMaI2DORSP1HKEydAhh58ZiAOLvyqPOh+YPyT+av6EMzQlOTD7rVtAGgAUBxVpiI6ZP4LNUnJdVUTm8t0lotJthPccfkaBFkN6ueXC0HL+1IBIHaH07JrRxheMXRkJvRA0C+HDt22SzAcGdEuVlvezFLgqQ+hyuJij43XUEpNCsi46lJHa94m7/s6D90BeC3rX0zrS9znUyo1erqe+ZaNjvTyAjCxP1ZFWCp+1c7yljhp4owTu0qQik3gyAsPysLGjl51amk8e55FZgkxd0EvvehA+Ojbhxa9c7SenBiUVtm2q6efXHU0RtP63F1SJ45qeBJBhh908Lgfw80V1/k072gd5oS3qivqwGqekbEY1zNDhTeW/8n9fkNXnR2KGcn+tlslCgwK5GsLzOmDBNHenjskRVqX+YU4krIJf3WvPQkkvGolQp8u1qTE9nxjQOstsQOq0NU7WxY8V1X/UsCJ7eHfDnyo2mTrwCM2grvYjEtChBDU6pHTmvOvKYp+AUioklSo+kfubiOfvBV4bSBmjqL+X4mXP7ip06QM4DIuI48+GZeglXCEfuS+2gdr3Ut0C7CbJGYslm45CNlPityopQwGx6RQOCEDoZ4atwP9yKd1NNb6b4KxmeMPKwf3s7OXea2Ui72P5PgWyCoOUiEjc9WaAr8jd7rmd3g7jmd2rgXlQPFQA3TUOUE1muhO7RDJfI2a+bbqFaKqxlPKbN8ZPibRGwdN6evTUr3jv4rvtQMgA95FbP39cbPRP2uzKPT6nQxY2UKjdBxg6qU/JCNpw+XcOzNQtBeZbn/HZKxFd2lx1fNqrTg0C9Z8y8kRzFzHnV0ELmSlNLrrLtOJfoP8C24Nc8gRarl1jeWvt0xkROcyvuTh8gKIuDH0CgjGPLtNCc2AhLKVWp12TebYXnwhCXBlRVueplFWSWqnkAyH8qBoCRPeQeFOsytDrINbZM+7nmlDI8WL8Nv8WWvjTK7hpu6jb3ZnKA6lLLOMEQ6Nb6YIEjcQTyn1B/l8EAMuh9WHIucrbNpDZvhfEcHzCR2+92F6hdIW7f4GxVmWqutz9pKg2JBRaiH1YEAUHj1uvZWrv/qaxHoirZeR/nHNuD3QpciJZPkxLTGTdg5qSBRbnqEr5YVbTqguMw8qGJpBbcTAmBR3fikCok2C91OapTVwMrwucexmV4Kp4iY2ExwyG1Gk0cXesMhHpBpJTl7F7M/MG1kOqxvBl2jslhk8UbHMWq6LGzttII0pTW22sH6Zp/BjAbBvRjl4oDH2W3sFo9vsVnNibYAzWUUzCbvCsi+5D/yKP32SnqtTNvD2OPSem6Rar5kgHqlazqgpNYVm3qULxQzdeBw7t5cYLLcbktMcgd/VgbhOoCq/lAiduBq3C0ngF3x/lelucpJ4Qx1bnVrHGTRUUom2VTUTaHcigMqGexHZIJp7qafmUk1Z9K9hc2ig6cHg38yBP4qRYLCDZK43omKXmoY8f+bgjtlIxyz0cdYaHf0WbN23hWhe84/Qc6TwH4y63nam6fdqYufEMSRFDkJgOr1Mz4Rfi/kE6lWJ9CRbzBNJG+FhqlOHxLXywGun8ayDsRxA4j1PTTlByhwsRL+5UJP+rog5EEp/OhS/2QIffaREHpTN0pl8oWr2GCtA3AAkZhBWq201qajnJYqb3COMwQ+bTl6Mi+Z+T/ukJroYE2mkuCTqTvCyWCb+EQjGq1RTt1t6L17M/V02IKbnzxzQIY8MW23q3Ylu8z6tD4W89+4OJHRX/XpsWvElHHxY8B1K9aEh7w4ejZkNOKWmnJahBXaaTZGwoA4YpazUgjtPB7j4teSA6ZJuSOveaQ2Z0j76E8fneMNnLKHXQe1p1XDkHZH+DrfFVad1H3SHEUlK2cTMP798puE6kf23hywzzdWn7O61OREB54IkVdwl+H/bsGDqUAWs8H48SjfERNDOXeKGXP2clV/NZd12RJNOlXyPWIYKs+8MMthqJR4VZzKCfHLXhmsZjy3qMKYLEhl/kGZ5QEK/QMDyX5ulQ/9yya4DRGKLpIBHxHPkRBGXF3cbtu/AXfMQf1koXDd7UXmWnjnQjpAEHXZSsFOSHVs7R9rqLncYoM/pDt5tUxZ9Tk2JfTyr5mDCh4BdTjgQxAIU2HkMraHD7glh1HGcnaF6nr0SL5geJrJosgJ6BhD1R7S5rT1nWvuZtLjYQBDcesJfQQcdfONPmlvmu4LvnKhCImZrVP9E/ezwcwtg9KL200NyjTfada99xhof8lN1FoW4ak832xgDXlwCo0GMVitQG9UJrZpkPTXC0Gcyu728ehNNdWrauzKZ9TiVPjXfaXXs9Eoy/yIu8ZcNsB9lwXd9ENj4NShR0Mu5gUXxBW/EzPh1vfLMNTOXLash7zbiz+DfzsTHRHJSsED+MWyRIJalpS4sNcRvHKzbnNHSMeiP0aLcWvITzsCzXYsMWIOPkWMYEPk5nE7+ZpvU6ylKrh/WwSxVcc665xaAYv1YD7HI2gD//ogYkKYwHt1W8+Sj3T1QZaj0IdT65Q/c1RagmGdUwfftXxgU7UhcyHnGuaKIo+EC42MyexUM/2otJnzglRbSrHbpajKCnIFGZmurnJ6fbSoacGzD0PiBHr8sg9/Q0yMvcZ225q6a30OgYaa52yrYt/KhPDqUEjv0I3NaBOlEHMxqZo8zxKMaVv1aC56Cl76xa9cVzk6W0BxJGV9JlAXF1hYyHNP7CQ1wJVDmX5t7rQz6ERZOJC+kDrkneCbzIofyIac9QVR5Pr7tyVsbhtjhiCLKfmZmP2l4D26IYjxjdw5W3/ZRMBEAaK6ldj8b1/YANGm7s7MMZXq/QWICmMRZlauwuOVLyPZvuIbTxpORBFl9yGpEzjjl8k45O+dcP2Do7X8Pjcn3RNcQT+G+VkssnUSBBfn1S3eG9JMpl5HIHp1T/GEQyqz+XGXC/dhwQ4a1rLBeiU0hDLREyZNCbhAJad5HQVA2aE3UWae3qs1MhxCUeL7sJzngCSJcB7yCKRM7zWn479oDhhlZx2t8s+ufTqmwIFjMU7VGnwsbA8430nCZvgULxM2dv6h6oInSi194lYt73p5goQ4y6bCmCpHfAnzBx4QPAbQAOlwWq5tWv5IEAWxDeIKhcSZplxh1+++2J59jn5LNGWrvxMW/hOL0amvJzE0SpODqmGD4t9QXGebsE4SJGCu8DtfBcfGLovjxN5oEqHrFnQs4ys1T+HnY6X8N4c7bxNwoxZ6ckQHy2pGGY6poZn+qLzFPgrklhtFNVdSmWZWfkq3K+w2Qt25iEUpcXNMIdSz3V+OmLMhLQu4dQGZ4vPmMb+6llKe1tjvo87jmUl36DSbymDoZ2r+EiYv7m9NhURg9RyHYYfeyLGyg1pwsyR64IdBGDMNeR4s285iDiXEOdi13j1VNoRZqL7+jevGZL9vDviw2riEDHkweFQXQ5wACfsGkpi2bzjINSYGKVadnA4TqDYM2k+QS0xS6CHVV5S4oeFg8W4KDuizYiYtSAZfhwG7o0o3e46C/CE//ZQt9yS0zvckV+omk6X7bVY3pfoOfH7SUtn/Ift8dLjya6pAeGuYWozEQeJffr5siZj52ogR3zegQMqcEIieeUxUMJ1GtjMa+pr9dWUFLePqJqzMgkk8FevS52iMRzfXZLCgjbGjZyGvejqMhu7u3kZ6tyKQTVodeECEacB0gI/52gTHpPWfBWxSdWHWr7lxA3m2NmAjPDajcEcb/9FdIu0FvdIukL3dYwnJwQP2lyWoPY73gxXca0vWoWKWueKKVFxLReuWIGubiNL/2hyxLDrNSsXpeoXNCwW3JIOOlstfY2eY966H0ms838fb+JDGz79AC3AiA3Q8aGP8hA+6y2gg2IGxFRMnNHCBIp4lpqnAazJl57uvfkO8S3CvrAnlRed7lSFkZFCuYSyt0msyjWp14nlELbHJsocJq60KLUus5YlRtejz+TLuh+Xs9+PQMKY4xiXaB6dxBi6NDrXkaMC+q/+A8EigtePtnJtmsxgEz2qeWbl4EvwnXVPwEFH1rG5lvTlC0puynpWAkRu/2CIgh45mmeYFvQSp63EJXrT5N2uyl1OH846hpNUxtTQADscJGi2cyDf1en1XNA3AXyHN85SIkugHAspJa99H/fd+FwPldGSV0/2dbMk35+aLDVBfgL/0aU/neb14G27wIlM1NC0ueSYGWW3Z+UX0oNcgDm1CwgZtNQElE9tk7JxRPf7Wpc/0vLFLc0kMAemra19ORJREkaXv7qe4fO3sK2zBlvm+K6TV8oH6jEDiw8hQdiHV1eZ1wI15PMrA61jGU7z8q67GaqQ2zPZ1+SY6M7GBd0LpNjVkfRusMaB83Qsi1iPU7bdJRDON5jKCkRsD+FofZvjBeyAjoadfjnozVeylkiY/sVhqzDQk77r7Uji+9AhfDAdNd7Fa7WtqJQwgtu7z++50IjALbhm+ZzDLb6oMATobZuohfgaosQBG09EipyI7nMiHAkGEjbaaQ2FruZWm7zThTm5v1e66zvgYOotGKOm6zFKR6a9/1rUHcklejwxHda1rmDGiqfkV+D/pjhMXYqcmN9OuiOsnkQF5hC4F5kJr61SyGc9Xe0Tq+CaBGBzYUZyaKoQU6VilmWt+e45Tgp8/XlvLuiX64FmdVrL+xEsiNSFDi1+jmHNqbwCUqOXWV4NsNl5h8pQGDQE4c8nWvXw7Q4pnL2O5s4g5mY8BZz5Z8K0QXSvVePgVzGZw5b+3E9b5V2tMNHVVj6LxoiKoIPX27Mlr1X+bZQO2Y4xnzKSiZraUBrsssT6BS/o4a5bi/SoddG5/q0Scj7SDTAinDuaEVQf8jyeXhOgpP4X+DMWKOf8Z3XdB3HjZrtvebb1xVyNFArJHrPUt8HENQupT3n3nqg7n1ewOqZvWLk9jMUlgXlvtyoQYL3cxdZTDLP/oiWCmsbm44Hc29A0lb1FZ4NFMKza1K5Ia6JI3Lv/oix2nZI9cy0tEzK9NCI4G194m5BdHpZnIeGVCS90GWf5gmeWGzvhVRbv9QBDdKn36o7IpZl2TdfNNNnclO5ORS0BE7pYW3FuIvIbZn+XsObrKcoqsjIrWIVpcDPazW1+XGM3L+mdsgUj3urUN5GaYv+M5XG5E9dxtwmzG27VQ/dKRGfgyVVBsDQtySMuxtnynKVKx4HWqJIYDnx+kAjPaTxLkFcGcEwA1Qx5xqyc+Do+AxP09PvGfUwKzmmMc/5HgF7oahLh/mucetaTQ1ABo/O71+SvXiAvcc1XnRY++2weXrNqGPpC6stjI7baXlOmCd68Yt4NVQZGErs3ZpGf2Fwzm0NamqG8j0wrMPLJMT01ZGyfgdm/hoKsBH+V+epWjz2lZiMrWElXzdJHCw5fcQfFRI1kDQzDHMPsbitUuHQnVkWOsMVUEbgAh2kPwELWEMVa8IWBVOxo9Wd8IF+tnt1lB56zSLG3NXPodZJ1ySI5DtLgEMVRqd2D7EkCkUzof+VrxeL4xn01hM9D/vL7swTNeGZO76ISjpDcoRVuPZO1UAXZnZdHkmc0ckVDED2HUEGFY9Mr41IHiyfEFaPaLlpPcq297P6JW7PCXrjKUIBNNWq8wVOK63xuMJB4yJLIL5J1hwswbzr2ansUV7EjA2EO3OJUTasOi4eUZrtyx3wUzuHudGeVLzn74wY1NsmTo3gxas6MuswAj3dhQIUVdP6v4DKAHVVExw0QSraG+9Athq/0h90Rc6jg67KVooUeTa3wMJzu0rv4D4WwXxl50rIulnqD+5IeuISRjqFsc8+Fzdwhw6PTs2cGW0yT3XBTA/Z0Bdb4wl2a+uUQKx7lkYe8wfeEtgwyKe8rPrnjs8Yiv8CKPRTLdM+XVYfevZJDTQcMXa5qlbpPH3Q72z4gAPEbkzJK4CsROPy5MYOq8kF8zVA5N4uZdJaLjR+TVuDjtzRciUDZVsuspZd4gPqU/8lQ5+AdQl3dguQRe/+kDNU6o6TogCuCkfaUuTVA2RXbSmtkhwM07M1TkYpp1yYXkGuO2RsEV+i9FX1vOZVlbVsqdp3MPR32JPNngt/1a6ereb3m7uT2BYmoFClUU8Kp1F1DkHsw1hNl9AYjYlWtzOE70yQvjI3VyufY++oRY7FCJ4mfRC6oYmD9ScV2fL315W0qcgAY0/nIiwwcmyxkIt+fYBeTTmOXHS7uWzYURCQ+Y0D5HETOE5kbDqav56eFBYymJI7S8Cw5gpcbEC82iAgZLAZ1oHO3cViK069lCKuFsQa4SUBLtZVBOeNbzIF2hnY//23zwByHSWbJTJX5pFhx6efAPQ6AnvNG41o6k6XL+gZYQOEKm3IaYbB0PGHNiSYEilvoI4zd3OAfR9CvILzIlZ6ctXnjjnQBggGfu3yhm9xR5ChqdC5HnKwrGSF9xknB8QkXUVuueD8SfY4Cc/E2ZjU1nBzVW0TxBrtOgkWwo7BkOec/IkCnfucFm4fWshsyzoATjLXnF3nPZZ4wth+ClxCU3bbrmsUmAPNs56lYlWUOI5gWGJTESOz0GHcsEjYvo8w1uRyvfyC/Xrdn6+G9nUL/IUAlwa7eHwhUb+Wu3ePPfifTb/8KaXmT8kY+AgTBjbYBSQNX+cYYbPjtDfFQ+A0AcZelJpM8KFm6TV5tVxFJWAJ3Dh216kPXCpKNlxu71NPJaCXASrEuttO1lOsvucSzu3RKIFGiPJW/eGTLwaFmbIw1BTo1MnKMOqxnGU/Hk+W6CIkSSXW92BvUuJxG4x/ogiXHQh9lpiFltdUBnxRCyu0I8TeOqS+KvmNo28CwxB3/ZeSpi/FljvAtJQUT670TGnxRb9M2bDdxiCNd/fXaOpIIMWXq0zyvP8aYxwPQvjbjB9EIwkSWOrGgSwTb8/i1BRDir2xZFKz/iN/yZoDGbnsUXXEbnQFHeY6e7y/FPg7SKOxBZNBj6P7oWLgWaIL8MgP552pHtghBgE/ZW8bEBIomaVz3s8lZwUdPA284E7f3V6X3wZecwLjzMCkvzAfRA2jXWbnCv4ofOl4uzyxCcFcC4nR3sM6sJR6rbDDREvpMoUQgPYrMLKQDbCezMeQXm1uM3axhsjC8sWs95mCPRPDxEuhlVOJxCfPh3fQuFCwMv4WqR9Z6tUiASeBAGoYvBFYiCYaOENj1yxNhsIpcH0Rl5NOgxrrMXggejA+kgOn/XjLbtpEokF30Mu9/HRoLNC8Sr0zIyRea6BkS1QtevAOd4k4YeU3Edk4I54xyya65Lc16hRZYRpxQ+JEFML3JaHnwXORpED+ExEwfwfty0l0o6o6vBfhr5rCwTPutsc+QIaJBSdLwsC1w2q9Hyt5S/uQ0X3801LXOayb8GBT1njgF89zDPOH3vBJwF/PmJVor21DfEH9ph9b0qHYkCCfPGK+oZqOFo7MFc2ZX2z2I+jGRd4tl0YOWY/kp0y7TC8Bravhj11/mHJ5vwpJXJPLoTd/mNUt5Fr/cNiwgX/rv+EBpVwfk7uz4vkc16kdhWyWrcAWFr1mggwYZKxeuUD5/kssOX6sEMlKz6zG3pNHStxMgiY+UbWnrrfM23OTQwVT1640BRqdT6JguBcJh4mr6W9vAy57wfE1Bpw1II7DdCWHbDF0h3ZBI76pInPyeJ3B8Dtml1oWSBYX3mwReRnDIt7sjQO3zLvb5tIUNALmjgQdL1GUu7ucPFocU3Xaxftr5TnID98MPcFsu11/3tjh3MVIhXTu2WshdDJ926xFyynZOerfHh0v8gFq27PVNP156KqXVW+60cBuC6lioQz48IKjjPuItjrQ3VBEmM+b4mDRPgwAVFAHd2wE0EgZOGYlrc+P1zI0PDvb5DzdS8w0WRoAg7sOurH9eu/NbMIztjsifkMXnR0TBxj9ftJc4094qPAYuzuuksBdt7JjAoZr9wR7oYF8VR/6T/oAGcNDBF8zImf+oTCDyV7F2ovaoPWUZHSUHx0+MI75FVkrm4ZJVrQFY9jR0PGDMxvaMIUFg3f1eivfHGsWjZdpUUV3u/MbXTK8qCbSyhG9yA1OALn91R32qOV0vfrm5p5DRBpxFTO9T7XtMYyd8naj6aMpd4xM/BMFRt0ED1AQAiN5j4L6HPh1eFU6+/QWw1lMgBMLoBV2r0+nPsmWGijtVZK44HrZe7P3qmin6smjT794cgdgpCEt+J0f0BWWpfOK5xeYp8webLeAZ2VKPNH8wWSgHVnq25obx8/SfQUhcFPILFN4pPbVNe9q2d5p7AhfixXmDr3xgXt6qqxjHvkVumK4zqnbSB+UXVShEObdsmQ21qbVtXjEoD+i+ENEXRpOJrinGViFdS3n2bxH+gV8BclqGrH9jGFTidsiRuI58sT5M9LKd4ryVncFtS2+2Ww4tMsO6Mus0AqmXUeBjso9VPyIJE0U3qcniqI6CYWYwxbKp0tlCLW4Ef7tfyCK+WPMxuMdXFXL+EHR1Xc51ag/nfXjyLqm9vr3VAToQEVEd/3PBukCAQltTMo02hFoKGIXYV3DFjAYQSh9drzhjpBRQknThm9inu2aWCLzViQes2a9KV2mjR9zXhiw/3tYuZGfbBY/XthEm828QsLSTjPWhw0be3mKJNpZe/1uuR1dp5UFvjE0uXm16DQpYpfe2LIWl8/l8bW1ONaE0huVc5hH1hV7DJkPAUCtBJcV8KS9YnGlVwsUFEZV15P133AqAkrqdXI8W1bss6RqBWI5jvWJqfZAJzoRkjIJIwAwLBtShGBtxsI4R9pASIq3dRfWUGvCGgR49RdJa6dfK1inACSy6olLOv+CCLJV91OgvaIxFmOuI2gA78+6O9IJbFgpTLyQVpfB4M9URqfYBc0yXLl2zlnF6vKUe3jnCQbm7XCT8jqi2+8Jw0OU4re1FGw77K4AefogeDI8p5clDo82ysQqLPY1pO7QxwkDO9+N3XabtZ3UF+/v0qrTcSZURR+OfbPezpgwvgIkX9SVMw/6ZR9FrYtMN2eOMtfUqXWVAnUiHutFEciyTFDnJ/QeD80uyxOlexoXaposGHVAo87Y4fMT5biC0eVoXnwh+VuBeaL4Eq9CxZVApMQvoWuzGJ5x/Vlj8aorQ+ixMWlsxQFBEy590F+7zbRAGw11eEaSV/2oCXiokjjHk8w0Pom2RX4cP0lxR97X8EV2ZK20pRy3oCKMNW1wtoVOG9EuQyZN0Hr+fjBxtc3V7gVQA037y5/05g1m0BJ1srsErc7TMA1cXhO2AZkjnwJPaCB6FQfdub3FNvXyhgqpvVump80Nq6KfFPBFKjIjsRCj+tgGsg2b4mktkK/Y7VPRQJAex2Or5gPx+OO9UOrWnRA2ZHQ7gO8bWwGA05oi4Zg55VYfZntCbQRihNti9SLWP/SWvDVJr7AmijHMwUi0QmGAnSOdiMTvHgSqLzr/Fa+fatlKNmdsUtJi6ELq13K6WuSOIT9VrSaxL+HNULTJPeFIjfttajY1KYpRs1PteoqRncBOc9k+G5YlBkZ8cnqxdthvGekt+qu69+m0p+LbsDT3tbGHtYl3otiwhCg3Y1JegH+gKW2kD/FfrCePcSo3szw2Qu5ViCZfx2HIPLhoek4FdgMBaWmLuE2E9aoWFb5VdisEBj7ppgA+x+PePwcHwabZg8oMT5fsujtkDzHy2P/OD5Tkfyw45XY/efot1cIDrfgIkUKGJ+zUDs80zlsyuI6ZnyHLK1S7Ubv4totXxisB23x+4Lh3PrhymSDOCsUEWOZIUPEZe5dS02oAT671Fs49Ulpv9A7TphL+QSeEFUmumMdXwNfdRDD0BnAgbQTdjTKA6qbXe3AUbL7TwBIv4ndyvFQpGHDDbTfoMgV5PCBp8n15jgJJj9LaSWQOhxPTVo3848lqUFv6FTQkdnPUcs3ml+c6a7tPxueuzmcM60ifYHfYPZRZ8YIMAxYUk6XiaYkln7z/yLIOpdsAf41LMwcml/QdDRq0uPNxFCtAcCUuk51HFoRimk0GBPUvBkzvDjLpstRHmUxnwfJe+fC1/YVS2nw1LpcreqstU8Grus4gwEYbD5dU4CveMcQ6Ampa2XIrPo8NqwnrQkXpDhq9qkLRIB03w7toM/Tn/Dln1hFPlWiO3u3MdWMjEaWtCcFNgg8I9iqHJEmDcZ9tdLPBg1moU5244bBhA/PZ5DRLe8BscMYvn+TYESiQO6k6CY7vuyKTRA4y/JKHMIiYWZ2bR338n7yKnMn04EWkArUSuc99N0H3rREtCbOzxuYF8Hiu70OMBxqwxQA62BQXFn+nH7YNYKydT8JYKXUHrEgBP313a1s79d/eL7fd0idIforLKdqRKrzoy28naqT63bfpITjNCqBZkajWkAgRNeDUGpw8WF9UhiNaqj9gsjrvleWtZyk/VLKrMxYzsejZkz7aSPJWycPA6nyDm/6UNgOXLoYVfdWelu5zwTeRSqVL/J1hMJ2Zz8w8/eyF/wS2sOGb8AqH9vuN9eHNKMFBwMK3iVTDpMT5yq/hp1fBpVmtRBEccWXvINOD5P76gTTD23/fBl6pjJ3MZuWCrTS+ASMqRkXiKW3zijiMgjaoe0e+Hi6YMOT/mlFhNm7QJsSipJPlEUHvQHb5/CzcYfeRLVzC/pwy9JakV2QblrEoXnvZmIresXTr1l8hEQaBmy7V49elyYtq2Qw9zRZPOOhrYZ8y+vLdUqV0omgJq7rsoe/wqgecVH7fo3Edq5UuTg/Y9lCU+zIh4e3SHGILQrUoyHOrj6wYLOwxd3Hzurdz/D1Bk8rXO3t2AzpBH5DvVheKLb6uRZLka7FxJKOJ9gMqDksGDATZejVLy266fSCYkG/La2Fzzohd+6fh7/Xzysn5CQ9JYzu6ChncM0DBIOUa6keE4MDyLSYQSzdS5GjZAXH9ILUFSfInVwQw2OEqeSCKJTqJL6I6kw9UKs/EoFcCBVahN1ALeomwiB9meEFrwD+5Z5LMEF+iVT9jGiRud75uFXX+LnIg24pfyHuborO+uIuPmehQ0QvQBr+Stx8vnx9OHEnNGivfPi/MjHUoWMsmf6U+n1vaiUIvHgCA/VaK8TukiHZN8qGCAYI3ahyMmgOM5c1LQEeomhNdEv5Mff6Q9kcWmjGOUctqT6cxPZscg0aiLwa159FfITjTJblEgZFvPl0dba/ieqtaM7FbnnCnCsTGyS8yhmc0Q8AueYkK06KXpvXl85PKNlNz0c+usgmCB9mWfz4CZ1+JCwtXcswmBOOF8rvZhoUYpfyqq0Ydq3H296JN4Q1uD3p5BC4MarEvsObF9KvWWJFTHg5ywqfwupBibyE4+JOkY9Q0McmP07tBUpTVBVnMRaM/qOMY2TB9fVzcu1OVbyELHAFyO/FPFe7Zi5v3CflLM4R4whfEFkAC7fEzSUn+lF/vcsgp5xIXJVa2HDowJarEU5RfHIVQ/d1kRVZ2dMb2+hRuXEkucJcvao/7ZbKJKEK4aFhRDH98QFi7bGGovr4vCFL6/VaXuTl8jDOx7lgwTiO5nWrkYLQZaDfXRLSTZGieVaglnYRckd0Sevpl48vwJFA1pEZlVU/qAzlMlrLdGf95/1tUoAIM2nVlvwgFQAUPY+7X5Y1+im23x5a9Hd+qGFtvCzqY/Rv4LmxcHehEh+N1Wl+1poBOXk/RVwjevMiVByKD+OPsLc/sJF1gPIigxUVdhf7AMx5+hMkD0ihG/0UJcdeuNPd5Z04aLdXEDk6A1S+hsM7EdMw3lXJGZPhO7svIIiF1vdN/3IDfQvAnoeZ1x6PH0LSiBWhURZ5S6NBTINqrLV4Ag2Z1wiZe21IC+iPJi5I6MrmF+7rmOcuA/biAZ7rfokVvnngaHpNjqESEaIUPSX1eOGyYa1WKDf5EBJxd28m8BhVZN4onwBfPBfkK69qzFqgcC0HHap/HdyhVB0qlmh68mnMDkcphsFaJoP5bnh0oyE7Drv9qDh6Xmg0wuhaqFjApT9wdQywMKvDmRfpmgZPFLfYmDlH89GEsVK+WKqJ9S8rABQrlTMHzqZpsdQbC6WnW+GAwfcdviY32zk+m4q02IT4itACwfbbxbgqrjPfWGIHR0g5clqADo58Xs2Vy1RoL1SCxCl/iDEEnjAFTrkKPD3Pvwezf/QcDGCzZtbomWQnGdyH1hJe27plkelAYMSCibSG4PyLFyjKxvuboFqTdQ4hWto7KFaDV6t5/u7cZas1PRBRdIt2Q3rG8HFes0N0N0+fr0uBSh1zjbdB9yD9axTeybehbmjpiFW5rMDINDYqzC18SMRySo6UjxnsFRQQ/gTUg5S8V0gc27Z1aiXB1UqIfevSa5I+Auku1sJSRzhYD5GeqYvsS90nXAlKbodU6nDwN9WDlZyGGNfRU7kc0NrHHeRX483dqFelzEcS2Sdtgk409Ealw5qwra27rD7ClXgGsBkwVd0TTbi/fAlvB9dPHCn2ZVQUJy1SokR1SUYs9U3BuY4oEV9h0DwHz7EKornch9ngcduSwPf9yqF7VJsybfcbIYIZ011jSDsLBlIQsd5PFPGk5nkBzDg8SzsLKb4FaX3LGwfrKj+p63loJ4bkIhmH83abqNrWxJtC/wZ2+ktE7hY0P88Az/pGRnQbP3q9E5kMwHFlGUQcJ0SyakA9yBj3ztNulPHwunf7nbCa5U2DiUmJILHDYtkkXlro7sZqlK4VK4xBRBRLt/Bmjzb9DPI57rIzmrFpS9Hvc+SQzRLor+wbU5oBlNe446643J59ffBMfP0aqulWp5tFDVpAGUxVpwcz7eQ2S5GcbfxEBg+3QHfpkEXRoAF4RUDRxEckcHuoWOdcSXM1FY9xVEuUfmdi8p/brPHN37e/i9lkTxpu0flLa2hmM5Sb3jIWnIZs4mda1JNHHwPAzROmoWpHxbhx2wW2aAQpCuj9SRSDHsIKB4AiE8UknGeprBomtw/SP+YVinMIVA57FVhmWh0klX2yyi8RB5QXOxJ9Wp9B98WpOnaAakWpJN+8uGsxeq5RZGPkbclCqF6Zh5dg3F1RxW9Qf/AM9Vs4mUNv7EoXa+jcaIezrSxenqvLvg4/DN/fTFMizAcDNA43Ed7toewtQqSJlnLfcyY0xE0/1GhBYPAzNpDKkBqdtqJX7ABbeewghl7bdWJYfud61QMvbFrHB0XIlZ/XfztgJCfjc5JBbSKH4hacsSxdBgbXFcy86HmN9gHXb6AA0izXG0vB6FbFG2t11ynxb8k10XyJlY3iAWvNSQmYXmYrmf0VVj0EGNA4aBpmD5XPwubT+c45cIpyyyKB7y/wMmwblBzoOn8zXlyMfyRNoS2SIzmvazOoBnEMZLrc2Zia0XSGCDj+wiBqoWP+DeJdQnH7pQh919buhWTqQic3C985NJSYnAqXF0scW6HzXojd+SJdVigqemN5d856w+TQSFoHFWSvYC30DAzV9VyDQnqF7gBL+Tg3ws3akET8loBmxRjEAqor3MtbzZ1c4g1cEtVqxcjBX6LyhFJG4wdx5lNxFJIcdpy2qk4HxTBx7lQY/VT5HSk1dv7bDoASSACKq6wnUgNHxnUTbpdJDSyyd8HB2Qk/5izBcQOtrkr3ed7oJq6BFesGMaoPL5q4IOLGP7Q0UUJoYf2vp+8tTUB5LHR3h1KayfqwVF8FSWC3q+d3IqAqjo43EXrIo6xrVmuZvJbHQD8G+lPrZiQlSYiorQmwGwG1SOtRGOw1hQ9GzaNoLhwlJ13OXSoO/0Bh7acmRisEt1iN8KmvztnFaDX1dp3sjxldWo1u3hKI1FDwNBYtbJSZY+56pEfOMlmt9a/8lLFbyV+dTjy/N5FPJe0TUUr6T6ejNqQ+LBGUMFoQ7+1EjvGh2l6Bx8HzhQW13a0U5W1kyb2vWOoX/Hez+ongk+rEaoBwwBMdUG9F6wqQyL+oYlLJOwQKinnIYvqi4duDvDEgfqY+fsqBBhlLqxTa4ebU+d5Xek3DxhrWRi5cWJqQnKQ+Womo62y7hc0lADjBnnIyyVHna6lvjp+eSB0k/HmKbxNFGW+UbD55lRSYVX7huLNTheAo1jCX05K2trhQx/9wTzsuG/L3u8hQ6sDwBcABRtnYH6hKLk/Lo1lSaUWhxs6p/qL2yOw5Hja7WVOxCy/gosg9X5tvtlkp7YunYTkushm9fqtnUMPx3CDW3g7MUW+ygt6vqF03H6jKGDMUEGOZ/F/L1vyBBpS5hEuk0BuKjxigaWYmvYVEd2YspThq3NsWvwBkgcWUAaTgeG/faemXjc3XhDEs0esKRxpJF6hKhqT6rgysiSjtLTx1frkwE3SImGEvcAauZhX4C2Bessh/2rB/SSa25oVJqOtfewezAvwPPj+MzxvbdqD66eSvpjCV8UojAe58JftqAxWuPPciTmtXbEe0P8Vn8xD3J5JkeXn+gNE5s0CYAXr8TfdWMWR1R+Teyerxq5cFsFxyIprF/JerpTjnoIDo+zfFhLp3Wp9dQRf5kvOddJH8EhdTna/haJ41+lyH6nnjDZwgLCMTtsoB2RRliWvUToaDZzo22wJac40aJW7wylG8DDRVi+jc8EL9eBuOuwwoGOWbfMleN3OccIWQFHu4buR4FzqcwI/iLk/kkKM2TUOlMnp4rxTRyEvQ14avn6bcHXdlbTEgwNzadjl4M6fZ2I9fjZTpEkCucM8nsCB2G0GgbvwzaQouj3AvUiWujDJvX+YkPYdWNisPBSYCJK51u7X6hJWGUKYmyeRxtl5Om+uAtgyO/9jN8NNhPFXzQNSdQwLrX+aJwvTjwxRgHUN1lE8+pTeAVRe6B2bsvzFN+vGe6InbaF1HtUZZexQuVxqz9tgn0TLmHDwOzBft0NaA9PeRYxs4UZ2ZjgpdKi7abcIK3Co4W1DQ4yj/hR92pSFojXeIYUZpQSrGZSfKUmJVvHfwi/0Ip6W3mYK+ClwGYLxzw35LEyyzHTdh7yMwPM2nztX8QvY1/q2lzj0O/Yr91pxp/0bWHz4ps0Efrsn+1QaTMY3THDyj543dpmD0EkAT1ictd6/qLrcC8kVFlkUIaSAdLD0w3Oem2qUNjXUuBcNFk2ok4P92Ivf2zEQEREYAplWaXKCBnzqCjBAV6+OcQjm4Z7ENhObWkNgK7506zIC55ImKL8DNZtUv0KUtyg1YSJBZRkrYHHjW3fwNcJEmHojJFlXZ1bs+lHgBXsLMZiDXvOQ+pZPDWVJd0AOEKfVZmC5A9/44AsPga3XckQgJMzj8jIeU1k/Z4hpkH7xDngaWrXLT6lmiuZcL1hj4kezopjFzqn0w1mdPAyAPqknHR/CAChvDEhrBC4pS9xy968fyZOu5illnM5YLLLxPQhkJzgRDxX3nQQjqZK8FenM5HOJQhORjEdwmsqkxArc/IZUNwOt0IkkDTjDjuFE6rVwvEvOrwCzSqm24IJ042zSn2qHm4P9SKsnUGTuBlZv9Be8gpx/GMurI/BpmyHgNwxhkm8oG9p7TipycjY1nnt4ccy7Ci0f0jJNZzXhNAaHDxR0PgTQeLt0w+RA86RzeKInsxbPM5dizO3FmxRtLl+iBH9bpzFhclF4rBrgrrwVQx0lxFNBc8cU9w1QjG9dYh2GWFfjMMdSsWP2QwJGjGYV1+nlQb0WmK+JD810sTcis2XLUar8xmejj+bL5FvNjPElMPyL2LJ34kKJZ9p0KaT72Z7HLJTvbiP56Ew/5ibZ+otIeXkVDoxIlgtHuFNVQajWMu9bn7cASQLc4sPmigeYE3L4OuIcj3jk3BQkec6unYaNpYNFHv9tshpxK8fzdFOPTqCXMHtEqUwQLlOGxVinSG5V7VYJP0pO5FJq/lx0reJCogYgZXO9xyusSrTm0jYMMdERgnfazzp884mHE6VCvVsok4Y+4oKYY1UePg/jtX0mwFK8Jt69oG6VX4nD5pY1zhXnfNzt50CWVhQNGTZx5jYLCkvZ3kteozto3Kxa2jFkHtbx7pSkFRXd74TDjtcg2lXpagCGZ0c3doq3yqMzXKX/l3XZlRv8DAHaaufcGn9uR1jOYP70k7251NucNpI3h8/IB7ty1eM3cQpYgNugh/BkvbqViAt40GMNxAX090MdC+eARjkRVy2hMxtf2xIySNMro8O5h/7dRPj0kClJcFkKDA/YIvQ9dMjdfkcUwFZASUPdaewCPb4jhIT7K2xTe70S4GmDceK3Uk15D8Tu7dOqRi95lPCiQM+TAgldlFa3gyJgl7+P8d2N4M3hOcP8aaDgBHiY1WZz9oJHdqA+c40N/V+QI3ZfNeGQCx7pKqFZuanZtkBmbm72/0sZnchxT69/T2b8d32mmGu+EICfhl62848YzH35J8ZDnyAIA1qOgWWfiaDkazyZJ1yldAb0wgWGDyf1Txz381xyfkaV0QWpvniyZaKmUTxo3DIR6K7htkn7jvV2mmomASk+1LtlojJjpygVRG3gnKW3HHR3d7eF9edL+w+8kLNo8T+RWUzM8p10bSH1gnCUtgF3h/yXBejTz9+Oo8OrCfsh9UqJkJnnVzXeix5EKapiwiXMW0VFCCjKAXDt4KrKYsJI9ozLokn3ZxY8XIb9cd1l/hY5zzjT9Hm/Yltysaf7MO3dZETNQ4Qa8o2DhyRYVgvkiRIqm+K9R3m5UdwgFj0hFpfRwhVPJ1BkRBtoN8tHBkreqogMxuocnQbsxcvIq0RGw2XJ6gXmzN5szK8qsKiZbSEoJLhMTz/Z2ZODPb+CXzygATWOaHRhf1/AEyaAjo3i71Ky7m8TkwiDT9LFIBuRn83L6903qbN/f5ZWiRaz2m06UAbasVQrIF9NhgZfKIK22wEXKQxvUiCgeIai2k/MtQ+VJrzvbUfLfw2gns0oSdMSVSTF1USgVY9EuXWwCXe6riWnUBLcYd2lnEjPOgf1qRHElq/UtyAfK9VgMxqzQ7BVCG7WZ9yTr+IG1UNyTitSa+SVAy9iwyT30YlyNu/nrCiJv8BWRaaOJYv7Q3KI3R2Y2jPZdyAfzH59PX8zbZPG5yBry2meFIGUMVGlwsPpBsfoStN/CJp7FgjmjkZ+KSAzL1tRqNVkwfZXjUSfUQ9bimKrj/qVR1+kagbxP7LTwgLbRN1o/O72TfBMhoBn3WQXtCSywpXheL5afid5lmbK3U3mfUGzmz7T+E90KGZ04rupjzTWqgrOgjaMGgmSstsAOWOztuIKF9sE7YQxCLqqV7pc3UzSUjpCb3aT/GoLEKeXnicbVeur7fBIe6JfSM8qY03gLlO6UArOmYlEDWgimunprxjLfLZ91eKciCnd1xZKex1XDyQJJQvvGxQ+/7+u2ECC8GtvaEzfOknXB+3pPfJaMPZNF2AFv+Hu07swhOLM8/sI/eos2j5xBkxxI+39mNaDnu2g9kmJjfLjgpMyIxI8lDSk8sw4+cNaTw5ppKfY4JIMYkgVUqMPkqI8/aOR7ev6axfL/us7mf9UX2i2c1L0DB8fVLCasvV0RUFDys+QsuuxgHaMcx1xr5/SCnx41Wh4ZkElK6yzDJTaS24Nmyc7lPj6jlcVQyR5xmOy7Vv7NLVaLLvsBm1PW+YreQcP/EtsAemAz0E3E0z9o/FNV3hO95dsraNWdF8F2QiupGWMEYMpwivvJRWdivO5+HZIdoQddzdIf2Pp4LrnC2SYHYfdlvscpiE0BzLtyI9TQfSmyhOhQF0twhNeWBFErs9XSSnF/xb2OrgZCFGZVfFSevi7pFcm/x6ymGwkVQRvluoMuir66fA0JWwUgkmWSNuK7zhnBjKEjpE7F9yVT8MWjaHHuE+pxOYdQdW+FT77y1fdP9dNilsrc7kRacRjoJigJ0MMjjTbIBW/JIeqOdPetR+UdYBCElbOpdBMxwg9dUs+9/6tzLFd+aPiyu6EsbVDIJDNK8DirkTSJTy7VP6JsMEjFQ13VpC2BYlLdqy/w1NrEBudKWH/H5o/lkmfUMH12jSr1Q+ATGfelG7l5ZD+bB4cYtW2+icUp+mB7NBIAFJK1Htso2ZqiIsIIWpzsgEXMkU+t51yBLCalN69xxEzglT7AxdzmwDb/U3c5M8Fl/FRd6lGz27tpUACfCW9ssjnwK1V/dHBldbO83L6Qq5KrgZWD6Spb3Eg3LLpoDLW0GBragw7lIEdt8AV6JROpwxERJPMIjAsFWuhQQgnfV0HOqcoO0gnTn1nQezU743cCDa8TGzAp47D0uDGx9UNX08Y6nokAPZHOMJo7JKf0OXTBAG7P57mH2010xqXgnBuyXgDSxOoZnZ7r9KS9CeNoad/Sjc1+JFCDZRb7eqmvnwxXkoyuyqmchYu7ZCYqtga2GX8WycpoXCVWFHTkUDmGCn/fwdcrvZOfUJwkMOL55Ss+WYhUCs/dNEPKY62uLTq2D2JMZ1S1d+nv8HmQVxwF4CQYNl2ZryBwRmAzgAJ+QrVpoEgpg7QQ0UV4Ju38SSHBMwTixqbcJV+n+qi6CPMZPG4SY86M2HrUDVeiHHFkWRuc2epTuRdDySABf0DYhcerznqLeW45rC/tNJM4B0q6AYUYNhzrBN2YthZHm057iyDrgzb0lvy//p9J6sb3VSdASGkHyCHBi6EhDamu01kk50ogNQN+KJZX1ySKTd2lDZ/+gWFgWOsaXx6TkBvGj5lCOM2JpD6t2ieEaZIZr8heEohimSwwAtYJ+I2aTyr2YBu/4ik4sCkxXXJ9lpyahSCn1VKLHqjtabuDLEqqt46zGtIYL73hMaH1mCAEejXT3KlWO96gk3HfRxKN3+Urt9Rm7mUSXYN+lW/5MZMlfW4wSFjCe8WtrOtzSCV8xDnlHCIzHIX/FMvLkekfJT03dwZ7qBTQazn8tTWO5aBh22GIdRweDpGSd3rqSYzwkFbabV8AWpqx1INosLY6Mq4rJCKOvbZfLn2dcsHvWVv/K1wzZ3cv8gLlacS/H6zWjSgdKCS87aXp1ohibVYs/cdeZoClJzJ74eNgTcPdX26bXO7LzPFRp+TtHATwXtKqsd4sazKBG2KXaca/eOM5NXuFT/yxmZHoOEoCpo+il7Ix7jIRFRu0J3qV54eEV9WM4O4RzKfNcyOGQmrFlfsDZXQX7ZPMpoiGDgg7mONu8YoKtg2FI0hYufelATbY5R0cbsfSTXrO2YDUoNa2AhTDzvC9DATeoaa62UQeknfvMi0uwH1uvcczghVGAMmvQ1lHUX/MxRvWuuJc9UgG/l3PuIIUmD8d+45n3tiEGxq3sDBcIPm65amtFalYoLNw4ApUbi73CBvNA4zTpQouPfXPneZJujIPTx8UM6OFjP0rKnFOE8MFg2/lje0c9VA+4MbWgzehrWfUKMnKfTvbSHqjwG7UzVXzoSzyCP/Xy1WKgbrns0M5qQxDY1ilJZmUSPrl0YKE2ArTJHKOZ7XL7hfRnzTZZcLm2sTwR+vcZ+oxedDhZvjr2q29Fksd47gtbipYoy1stG4gKD/3kui5+VLlkswrA14h+9o1JjbUVJZOEb8TxntsiVm6WSCp1GVv5Xf6u558sLH+RnrEtjHAidX9VNb7wHm80QRknK4UES2vlG723rEva3NV4BQaTIGWklac/0i3B4RaFAQAoNn2y7Zt27Zt27b1s93Ntm3btq1ZxCzkhLSKCy6cpHPPYplSe2dsMGZ15FtU4hyagh9U8WQgzFOFsylTd8+K47trQcmkV9l/ZFNny49orB0gQhi7ThkstU581c1deD7aKnAMNkaHF4VVV/0XIgw6zvi0RHynSE/O0S6Hp35p/vqzTIPRtrhyYp+ePIPjPZrcobx3AEtPvW7mQtTapnIxDL7CkAj8keOn6VvIqU7H1pnDKr5bxYq8quPwczZc9jnnEweenzCHUE6lCxrJKGtEQSIIa/tjPT9skgizROIJo4iLDb3enaol9zEg9W/mlMBOaW/r5Pdk+3YYjadiYTIyPmbJ5QdcNh2I7VoJmi6CEhzadZAbL2y5ZQViZjBSCjj5CgAxuQCvN+q81K96vsioAjR1kUue6L9QdoKP4UprPnRMXlwAHHWb+t3tSiFw25cDcO3TzSYy3Lb4KD5jtypv+MAgW1dMpiawxSdSvDIb0h/Imqx31vIv6dY3v6ur9HhJqFh92dRy4xsLPBlHoENwfNFUSFenIB3p80mvk/pyuqgMwsAYrJcOp/lLvW3SEoe+R2HUzluK1SKvwkoFsUXxa9kyKfsjEDN6dOLvy9T9Ny6go3LoN3m+2TBBged+kuaH/tUy1QiJnirVM+e5sRz3XFwGoZe0aFZeGnHX9V6brHdKUzNifD7ttogsA2Er9CGMO5N17GCUjj385kgSdHVF+uOUYGMAknsbXG4fOrltuw96RCwiP8hHrBNmZO8OeqI0NUohI+FtSeKNwkb5lSd9MWvaOuZZ/pyxyGNXtUruMx23JcJcxFV2xhwv3/lXQnsnALFRBmdRnW72zykYTFt0DL9Hgu2OeuB8zTK8WCvFqvmS+n0VcOx/sJKhlljzVhpQAl6IAq50fxeYSBG1vqwztf5bzjItGNqLW8z+yYH4x8eKxmDlJZBl2O2l6VrmVI5+gua4pbJJmGjqHveANReGReFsaDqrbo88vBPHv9M/kxVCt7idNN4jRbCl9LJJNOJC9+XNThP3Ida07h7zDKCuFTEUiUz8MsVsO1s0vlaVfAihyLOy32HMd8C6eQivptJ0MsmjFcRIKadmw1gaWxjM8a6aCNhc915n29VLfsWLQ06/1agDfBgIGpvpxM2fvLzGufILj+XR5voT2bv9PKKxcfdB3A95tv/uIxG/tGomrfjQb2H15MU1yRIA474O9st3ZS0EuWm0yTFUixBs5LaRippWD+9g/Pq/NtthHTjKz5deiaXFJswC0x7CCRk9g5X6BrzdmLnUVQOPMhAxO87HNL09R74tAsvzBjVMXjaseaFkw/MN9J5MQryq5rmwEl4DSRajxBX+964JkWHGZ52QUK7vHm2yFHEObfgfvYqPQMgElOztMC1tybrhgBDfwH0uduuQm4b4+WT59bD7V8IpxbBSEJWx2VZLshbRNbjvp4bSwD1jsQekc3d/7x4t6Sznnmpn6r4fCxqZHno+hn/1IlNIC79QNAU+KkL6LRgCL9QWizO+zUnKeKo/p1Emf3cz7pICA7PpbAcCoyxseA+n/xwfE/DvNEkoalUEQIX+RJoVy8U9L9rAxVlfs5wgxQNhlk1C5rKfJ4xgkUBYlcnKG/2+JyJVqOTIbZx1Y9cb7mVeNuZtSIGK7n3JaU7GNSw44oKjfBd6+YZ5cn1xOjN2UQVz+bjdMfJiV0z1xDvOsX+0/huvJA8MJf/OAA8UBKxUaJJDU2sRVnkzH9CcX+3XykS44IvXsV/HttUbv+WrJC1j/EygP6Z0o5HIhB24Emyd2rugrYg+uJjpqZUIZTB4DETNqHgK86HeFS2QAQqdMmkt2UXULMOX9TbPr6i5HVBdlMc+oTNyqBl6iYejqOEPMr8MVv0uCdpkLYXFwj1krAhJR0bXezPvEP0AQ90E+STlTCwsEGPA7+uKTV3UJjdoVovKbbBOcNdEg4evimKWje4dLYtmAE6fBLV09fW2AAq88ht3azMwBoXtJb+/r0xJfm7nGLNGct75vEy+rlOthalspjwqX7cItAprZDveFgMvCoFnSQa9KWZXccEceP2d/6tZYaKndqX0IcA8NpU2mMgEIvrFA4nd+fkbGNwzr8PW7jJhHI/uMI+HN7bgse1BkfegFbYvcfJInCHkDiMsrbExBXooFAhkdCNm2BtveAx9WWxCXOvD68QJ29sw84bHmE1oUkoDPMy2jrORMz0aiO+7E3Bj32aVeb3dE5Uni53Wnjn8wZD8b74BcZeFWdJHX5DXj8Y758t3jRhE25sjoWO6z45ViOpkcAw2+g5Ytzqkhd/JzPtUKS+SVp2nBZMuApFGEYPt2bEQ71lANNOKN90xJ4eEah5kC5oGUZzzVuwfmMvYlOHctdiEXHpm1WMUFB1lHUYnrrmbFSrhqvI81Yi3JqjkiBRlvKlwqgVDjey3AbOdqh3gothiiXg/KPCyGVPX/soT/Ja2vK0pJZPXDUSNIIHIkAqSjBgaWKGauKj7ko+ABrgQsFQQB3bEQPHMpEvfScqL1CxMqaCLiUVKqu3PawZNXApLhOtzRV69962+uYQCHsZAQPHTnlSd5tEEWHdhcNOXqK/CH6+7a5Zy56o8UVfgTfu6u+tk1Nli6JkypRUp1fLC9FbQjLqHwxvPF1uxhmXTZststXhsKTv3ZwQHog/ITNd7/k5eNWyqUzvz7ZxgpANDfJUqOiiKKaHx98Mm4QpFJE+kKdQr044Dl+7x7dEb4FHAwO5b/zfvJ93jMuSRz8YzqQ+1xy6GWezjzBGhF4irQVXyg3nEij3pzjYMFGDWxE0Quzh0SQZXVCLTebTc2e4uNe+ni9g3OHaColfDYcJxDXjyiB+zhECSi3yNzAw4jK3ZU0+2sgypfUfJi94J+4sqfL75BnJeSYijq0KxZJNF0kjfWmp/ybjc73gSlFKb7s45HpNFr9Kta/S+OyIocrMlQMS3FX6C04ihDTBRFxmdX9scfvva61Y3SaOoQfqIIJR5PtZ0YmDHewsw2jytgcmCwAlOymeGxQwlg0OjTKbs6yuVKqZr9KetI9zvwYU+apUh+uhazcgu8n9+JNvUuwPC88r29oLYMJ/akx1CWC9NVb9Cw6c2nnDU3tFrKL83lgh5bWm7AJNdgHWmbzw43JxS7eJD68kb1Eg+CXfRr7hO9/C9dFlcZak2wURefd9bkIGANP3gtODoD52wYJ5pcW3861Ue7+Sjdtn2NFcnOtj+KLCu/FTb/6BG/e91Myi46jV93JE4C3Fpa7ibiTv7BlFq6O1lm9YB5PZQdrwmk/NQFWF+2o1iauCsmAbpZpT4UkAwHwRNEkTxPFfHDS4sz4KJpLl2lVDo6WHK0cqQivoD0lLrvU7EXaOb+vafFFyU6X9fcSQ2e8S4LHe5QPLdIy4cDOwVKmy/2JQ4SaC/b28hoQk88m43avGjiVpEIwKwro3cRM1ChQ6h7+Z0aiJ70KXq0C+ujCU0EWnNKnB0RDNK+QSh0h25MVU6h11mSpFiRLWTlSww8ohZHcyFyZBd6oxJ+/DOmkAuFN80FG7iSlAyIGQGt8miVSOLBSUfx057JQJkTbhzlyNrv53MmqitI5tPCAnPgVvBjP+RXAq70RNSlKOB9yLC/Uu+PGY338A28oEHA5e8ATkTwmo/ZYWYWSpE4VQtOpvhYpfUUEh+grdCTwbsNjLaHEZm3O42OpPLDa8SR/j0mqywdk+uPIZwzBb2Oplo5gh9Rrk9j7skaGjk9K+DSRDFWm1is95C1MERGdhS8NYvEEW7Rz0HkCq12ejyD8XWChZP6wJqzT3E/uPTjj+DcTz71dgbJdSCMoehIBgCmbkE8+MBoiF29uuAAi1j59zE2xm185HzJnjQyWiPu8kh+fbLMaYZsljl7y17UkrBRAfZH5zAS24KtxQN+sRwr1O6a6zF48gLKvFiBNvkA3qANfHpuwxkV9+IxlCrmLtbcBy3+H032PF808zmO0oseRTAPElfJRyv/Rqh5fGz1jmvVmFn5O56fX45CC5mBdtZUz6rGOnJNaiprS1Vixlax3yRhITgy4At1dSvehu6hEfm+AI3YgHwfOBpo9PaeELl8DnYkLtY+zYDj8KcNX5cKVeEyCSWZ2Nyuxju48IjVSCRl569ZCoBEtWKi8SWtML/A8Ebtf2HSCIE3ziMoVQkh/lFRDxYJwmyo5lU1wZzxVi9vubvWphkyPy4xn1E8+VUuWiVRz9+4LUuliOah+uoaNKymjSmzcFpa09G4y1HNwKs/dnd5RYl+4oDBmDdcrx5en2q3ykGN9Im+geJKsXrzkwJCyJPzXExmL2FWokJOD9sRlmqqAAWh9I1QMEm2MqCUZ1RgIPO9LDLfMknNZLRTbnnxAP0bCmaEj02u/rfEvuAUPLGT6h6urPkXNKTMmHcnBNVO8uZGD4ESh0j4zqy21MO2U2ZDAliTLliLZ+HvKvV3E29m7zwHlnMLyOrYsxpxfD8G0W7Jrm7kYez1gqj2xvpwBYJjQ/cCGeftlK30DUdFxgA0ZeLCwsV5HOSJw2wonJIIAmR9wjtC7JMQ0YbTk+RVczIF7V3UugKd1OqBYo72P6HRDRWQfFOacbeZosXvKxclD2eO2x+wsqxqKHVTaKbZQjopFkN/UF8MI22WrP4a+LZP0y48fFyr8H0anfFU0lgIcd+8fYRqaRqbe1xw/nek67n02TH7ULX9JRYCHICx0+Evv7cjbUlIuMdK6bdw8pJegJOZS/PANOJkL0u/i6+lx+M2+k5SWb8KkIguXrK1uyO9LX/1D2/Em6OddxUZoelfJM9R1kkbnSFcweDta74NjveoGTQPl9WWApjsDhlYjyh5eSjKN482VlUuQm7FTr5bSLuWVKQ2/a0clks0DzVRmmylYdz2A8DdT7gxkvgw056gObHAu/QHYgHb4gOWIXeU/1qLnh3nxZjB3XrPGfVj8U9vdNhFmrLC3zFdU81w6VZFppUXKE5fOQgvbEhyRAllE2K3olJvGKkDQvuffAkYtTHW3qBfMTdwwxNDSE/xBfHfRuNIhiuYjzQlxbMH4JdsaX9CYSEK59zOFsY02cSAtfBNlEK6Fd6ssEkF3/yCTluczwrEeb/agEe5Awaoek57lipmuZ0dNqd87ICIqPrQYnpOjz5lyce4psgNF9DIrI2e+D5TnDcxbHW80h/WbUJqxaxkfwjcnpMhlr4RRpiAJ3T3WHUsXP8g2OyJdB48Fzi5VuLuZmEKp1FU13/I6pbvYZMcQQfzO7zcFHPkRuouZa9DvObXNODjABKzV5NDKMzBgqHLVsaf++QsGcrY8gp4WZzJHSYmhkZdmB1jd1TmFopjotT6WfGWtxy1Iqj8dTVy3XHfS/i8Y0XVfXu/VP/B3APwlfYaaB8EPas8EsZFjqi6ACGxpFyAdbRYnbft3F0865UXBJglb+a8rQXTxtpYyLhHgrY8T8hVlzMPyk6f4Y3oeLCdKDmNgpT611/j8b3RNImvKx/aRzbT5/U3NbUvCbm1LU7k0/JnyffNKKHsmhfNLeR+xGdXcf9J8D9TiyBLv1uQA1GtDGbhLyto7puyTTyYh1dCz0QLMCVG/3L9poMsxxBpTJKgixB3Kq/gO2kolKM9cXamxAFe6JxPIrHomiKkZU9qLZQHDx9A4epR1oRJ8eNxyfnGTb3I9e4CtRRBQXDywT2nPnpqL30OLsQ8Un0KLo52rQ4eI8mdNjH3pVXVLer6EpGwXb0c4QQgnFIYnZQfa+ZzDqZVhsPvc70M5WkKIzswMHDpakFD+fvlcC4CXm9QHIfsxGiQm8/obr7xQt9rQ1e+3AyQr88tcg3PVwkBwgBw06T2n2812hw0cD5y+coJHMyh0ztSjXOetTw73hBLZM2NDq+XeZxaObrfho7SGVSajLFg4BTxgaCZ7bP60zWUk2CMtvmW/ysEPjrr+garPzNVb1Q9u32/PGNcWdKBzLZS+AIuukZJUZF0Dk90Io8n25cldgqWIBKZNbdXSg3dQCqPalwam5utZZuBJI592qPdM1mxNXjLSzyhMRWVDBSDp+8NunV+jBl/TedKqqItMd9Ts2s9Z3FSppgrlAQbUX0CSydLFbQ2n2EatI3xAPXrg5W7dcan4jp+dufn21MlPHUE/DoZJTBogo0hNPgxOg3fU57UayxaeNp1LiDvkYPCT6X3ZUjpQc3vAGQy+Lo5x3livyqitlfmK7VLN+66CBx0sXhcWIzlo7pZz6y/swWqJGjEGeOePEI+8kVR5Vo6pg2+JtSbOwLeP9AYAaEFNGLaEnfRJXOEbzCFc4+xf/cy4XqO79xRwklpeHCwUj+2oaa3QRV6b+nAZ5DpiUG+SBXSVla3Ik6vGoqP1oS081cjsYaM2obC/y0wL74/9jaHUFRTdqOUBvH4iwEx0iVbMIooTrZguO95j9GQc3qQsGLwJYifhd8yuER+/WeIEPBb9RqQAoZdM8RSRHGVEmPTcjj6WX3SRFZdD2HjRTxsWa3m+h5lTxqjk6EXjm7GJNM0qItU74B36uKhbfYKP3yAy6KfSL51p4rlxs+cr58As2Ukx/838BIXw6sLafvM6TdFR8W+RKy12JP/lWOLrzNbQoj0cWmw7X3UiwW3U38i2nbzkAIOLOHq/kLOuuRpLOcteEub45Rt9d/UMxNAdxfgdgiT7IsuydRVgD0spweNHwR+IE0XSc4hlfkubOnEncYdUu7P5HfugetB0gPQPne9KFV7UwHF+PXR2DZ2q2oA8xBcXowMTcbKaspcSkEBjgxrUmhr+Mx2c0nCfcLZEKzoFe8PwiZ/zqXJAPzSO/oW5KUX94/fIwNC2HINEqYRWhmk6nv646AbNek6MpaakOo5NPy5Em4TMYJ8EVH7nq/CJIl/B8bUtkgWUgzhPFheEGxP9ppc6kDqcZhrlGZNiuBUbDv9Z+d0V61Q5kQ1YH8ojsjNUnJvURB/kNs0tOImQuvKEHJp1xSpZtZJK4bItpdNS149ihw9SFFMlIez8b04nenSKfqdUCArZo8TPB2O+VIStcxM54vCNDMk5KtB7vyC3FKmgn5FVhlcSc47YDLXY/WG6jCmQSfM9pKOe+JwNbM3jXWcHsqLfQwqxKLqb8ggWOuIcxMFRmqlo9ZehtwYHqhK9ssJ/ppMl8LeXXEY3ggyJx8yLONNoWCXCxWaqyiBFORhCdAGOLI25SAbc0TCT5+ZRJwY1j2EEc5q5IN9MlF3cfu+Xc/Ml0t5GFyYcBpP2zzQjOBKNFa/kqZ/p9C88j55p+wVafRmalI9MiHzjO0DTMQcXlUSlubFmFbuxvXegveI4akAoylkH/qiLipit3eTDOHpppi9M7XfMVgY6mN0Gq3SpDio20YdmKE7PnhYSAlEMeZsk6rY+mBEeOz0CV3lE1DricHGVhoWyo+939rffPxMx3EnHeBXRCc2Lc+2F9cpHgP193yXcnDvm98DIF3Ktq9a0NhHqTXm4KTuPjTjaiVAjewMM66wVVxAt61cOl7d8bX0XYbCeCy9olfLFIGCBjzEOJIy8zJt2PVuthbLc5kOXKQfxhfhZg638z7ES5C1fx2ghbNQuS1XjYs8q2k7MgPTtiEYQv/4rmQqx2UmkMhvSe9gh7PkTpnLsY7h0nZ/fFg4I/AssHehkdX5AH2ZGe/8k3kQiagO8lsIEQJdYd0OMimm5HIatyzYzuUZu7tgtO4Ty8Pvo3VoOkFd7t2DUTpaRwMxEiU1NMX31KKo3AGeISFoGlmYM13hxRrbTM1LFYzVoX5K/hZ8xbmtddgAp8y4L4zTGp0EJTcHMV1htNatSjpPlICdy19/xLCaGf9IkND6x46Usz9wU43J03QJSMB5EWWEdbPbapfEZa+5byyfx6rm4NgZxt6aU8kGmtOKxzH3qWNfV5wf6YX82J+vqyHZ+JQSkeOlwidDYvvxcZkliWhpW2OfrA/oUlxiCqmVU52rDlbejw5vLShUP5ouJrt+iiFYZdlHvDcL8aG3ptvLp6kmahVPFTLQ8m6GCsIUolPG//khVqz2OfYBInPrOKTA7k9SJXL3hcEXiv292nbKRHjQUgfTjQpNlvqI+pYyxxE/pWZvz4pAgOOety3J+pzalW/uDQuSapFSUyRomtWWb1AswO3jQKCyPkT8mrcd3VpjIF8CEn6Kd+fPn+/aqIzicuBHwIcmNUQMaK2WCV7D9ktjpfhyNQxaNBUv9dyoRyQLCaVObTACUpojhWqmDK3KCFyNVoslNHwO3xFTPcnBI5ohlqBwZHjK5q9h5tJ3nKSLh3uta+zTrwBjotsghcF/aHD8X5KzstYEU73YMCVJNMV8YRAAUmOZyP2DdjH3qdWy2RGU7fU/A2w7lwMFI8UGyzGPPwWklr/lbz9QGVxdHn2fiJZzuR5kzY6anKx6oIUjfJwE0mntzxdIH0xZPrYv2FKr1oUaX3LR+rhx7LddbzgiOHrUOUO8O5lqZEB4aaSbfzMyUaUS5hVML6jHfuvuRozCPmjvKBIdfRc1LYiHenRVy2XEysUATEpSYTU6OSld8YGQYWaPFCabPZb/DwVfC2ZQLF2ncgzd/Fv6vLKOH5y4UG/8Bkny/eXIrrN8EpwDHsTO8vYzioJklAi0+H+/Y5ji0MgdNNsaVlsaEMcaoNDF0i6zvkNfwr7sEtq78pvH5BY7tQkK1f5g1nj1DkhYQrq8x5/1BS+gIYoum8cA+10zdMCPypnsH0r1HgvaZXzTRh/EhdSR33b0uQJ3gv+vVYtJsgx4UIrlVvyKF4VdZemyhF0Jq8iCws6RaBpi5Fys6omV0gi65/lxnn1K5xyeRsvK5O13O2eRzXIoy3sU0fJoVQ5rD6e2QJqD23ubZufHnLz8LOyzk2I+OTCG0JMrePb0D1Me67en/iC+dz9T6rNfJpDGxR7O9XLbJd2+rXymZUdaQ17+rgc2AFoVgidfN+nVKTwKPrVuzQfgNpNGT9JBR33WCrs6cL0GCqz28NkzXW4FR2Q8kaLyQisjY5J7FonYDyZrIbLwEktcoDE48trx+zN3MSMnDInSWQ5rv+XHoeRzrtIjiY+sBiWMIvBagkBkSlakNKCpSEA5lbm2TPHdirO2wvmgk7NllPabrVn1Om7iVBc6Dohdf6Ydh8xQKhKIw669d5VmdE26cU/3000AeS/vkWVEH1KV2Ea4Fx7eEA4HDaKMdkPXmrec+eDXJTP6UUtIArvDZRQS3W+s00o2QMEW0tblqNvSFaPSdOdP+xp9sLRL/96VpobBaKX3PAI+DkOZOVooFlkiWuxHSvH/GpxMkIqiU61dI4OyNidIA/jTbzo5PIERcYy4oUIRbQNJjWe+wWXRCuTiKdB5BxeIEg3Qki7z1lOgZN/2szvloa43hxy0TJhUgIE5nudlEE3h8r4URnL+TTehyYEuDRchzHRNjBYZmOrmVegjNkoFBgzgYtXs4GOPeMGD1ItIqohEF9wW3TBEP2+rYQ+EzHftCYjH3dkHnm1k5qGcXBRL0ZgLU/UZTTI6tos2/BJo6haYf3yv1jWbTg7+pg4aeG0DZ/NId0ZDLQc3ESDczlXlP+KJzxald3KrDlp0qFh/OaQH38IdqNPTLfALEHCkH6YfnTFhORr5WAxt/5imByE9Oeeubo0PVEWJBfkm2x8EzClboryz0pv1WLKrFfyFuhRcQ4UJIqlbuSfsmd9x1VzjEDuwodljT42PdgnyAr7OWa8DzjrohyIIpQZrmEdvCpOcCwe/ZoGu8/B4RnbjL8vpHDKVfYCzDLFJ+xD4FATqviY6J4OtQxmCd32COlF09T8YQ/WNEC2vo2ljXLRWCS39yfzibK0myfC+Q8x/QDevPDK2GQtByPbO3IwAyb2xjDC75k8MeGU48FajRTUAwBp1FAHIgH4qNQT7EWUZzUxhu70lHIGtFKJOk6Ii68/rdtmIVf6rzVbksdYvmGxkymEUG97WHxCfVruBb60WDN2nrnrml5/Oa1EdMCKlAj4rPdywe112RyGRc0HH5labAwWC2hjrOxQxeJSab0ogaTX35Jq23zzCKr15734AOHOdncnx6sk+276KnhWzWRda5xIf1sG96MjuiLhupXESHuoXrssKHNCvVxY+/y3r2Q27r3Nf8kMLeM4bEEIOO6JgKwHKIzmBnImrVx8ebTNk+JcEqcEkStKXbX3vGsNmzKoELx08t62A+S3OltLUvvmkrhr+gKXSKu5OEcSGkho159sLsv2g2LsyrPr3i3ovU6/H3JxqEfAPguupEJcM6psGiOVDx3uq5qBPrV4iIMET8jPsy7pQd5VIccgSiDKSFhGgulrH+5fZElaSoELfTYmbfrFvZ8jcnqPPQSBvyMKzzifA9R5u6/bW1Q11vhFDtnT8Q00NFtby4MmUIOF8HP0GAjlWmLGDkyka5yHUIZlweE8CUr8o2Ve2FusiIsY24Tcot1SFAmHbcG6tzWhun7A8r/fB51i6lld9PJwk80EUKc1ei43NJQ8EeAkHZp71asSa6Az6QqpZoXPcW2HhuGSgBktHgRdtI3qI3+5PrwkkzdvKTjK0y4DYKMgUC5JWf5Mi3imJhv/Kxf4LuDObAZFMU4Otmu7IoCM4r/wOdB7AUWjw6LgsI9ekpF72Hl5aVqZbO2kuQJsrqfOPCpsLU0XMU/GS14rtr7zXVQBEVXwL2Uo5Ev+NnM0p1mV4EJ/e+txTNGB1cglR5ZMkwF0uvj4RDydMCka3cELIGswdeXIuySSnInPfu2WlBCL6V9RZfNSjIKvZWIQ1xtPnRcMEtZ//rLjSwGjsDnBlFMJeK6ZxsKaD80T8QXEIc9hLeX1/Vf50nZsSJZ6dn1sHojOGYQ9PhUs3VbJrHy4+exk8zgdWJ5P0Vp4AgK5h3RtTlPeOxL4qUxc+PL2iLsR2iwF5tK2/njPd4pa8EMvT5hGA71iV6xtIyMUZS4+4Mmd/M/v6Da7BHJm/dZ1YPv13ErGIHkNtGUQwWL/rFeJIOUpeKwQzeslCMZc6Cj0JjfOD2ppgJdypm21WBZ6l14nAVmjaIcoA1F1ss5vlDEtwi5wJo0XrLVRB+cIYBIPHP80zsbtsPrlF2Jl0NK5ym2rk96WHGKtcNbuDzBxMHCO4dQ911BXY8GIINLw4vJhSBjJAcNJDg5uIMyE2O2GS7aVhz1BhGteaUbaZEefmrZHu1XuD3wL/pEuG0PnuIuUvaPI5gfyPxiilnYacSn8S4YvmcUCAQbdIzP1Xa7cYXbYEGZqrlOJjpXqVvfkH9SDywXThjBcxyhZZNPHxPGPXV36YgDaF59Wd6kFTd5/a2Y1BR/9gcapy1g8BgFC5PDkgdIhmKy6/zk/xgSPQCbHDbjFY7SSrhpZHlRMbTivmyQc8C2JJgjGWCTCmaOTQiyZxOn3FxLpAUuywxsMDr+1oK27FJm8yP3LNC/WewThaUyrrUhVmd4PIFOcbx+1auUKdbZ3/HGiV8NQeGTZ3NY6ENoVEzxPWRREZjjyMvtvLuWtD1fD+JYTqhpvsG13sUyYaIPWJ3Z/ld7EW1/JTSlt5ICE/deOzaF5BLzfuLOORUz1bjSEYYzQdH5XbCY7VwTf1JySEdFCbVv5di4NhOV8qXu40Ljr4r7jvN4e4/R9+/zRa5Xxux0Z+gb+VXXLCDx+U673rq+hull8ktPERhtV0wUvvlIAIwco4nVRhZoWMRtmIzUgwYBwTiF8miN8dihskRg3PxDu8/vcxuP5AjNAW6Yt0MF43MmBL2/LaDUjdmggwfEitGLfP5sdiG+80wzlo2Oq41oRlSIhbi4Qn9jr+2BYj6uC9sQdbce1jeeVrTiP3E0jNLBw2XQ/nYEgSY/zRhqZJgPUbGZtwiOnPwpbTf36UrsRUsBkmzzC2KyokHBPWB+b68BPELfum3KyBYsCs9rL1WHMuSiWL0a371CvdPoe1SUseEC0FDH+kswlRFMAP6sFA5LlMM/7ypojDiM/+2TQvEycnu6MvRfyPRav0CHQQNy/0aDCVP76VU369Mcg3Uz5tWdhXwJBg1vNuD1dWvutBdphbbe//ki0j5VmQCvXAi8lWuiWqiLuc19ZkQlYhfGPKVhWMGU0M0dg61NBuE9moATSMacM9bBgHsYH4l8fjjgJaFt/K5xrmX/ihcFxIEEKHcjPeIS4/5jjefSYu7JzFLEIM8ii+YbfIFwYDRDCxAg0DEkYdOBke0XWrcGRIqMGxcZdNae3rJzfIF1ffRPj0X/z4hZdMuwu1yr38yKc9QOfWQ0Lt330BQvSWmHa6EOHIWeqMVjQJMOOzgoL13uMxFtu14wa8tyJ8YOdmpT+Burrkd0FENWWoEfN1TuDE0bXuStJ683gOrJYVKYEH61VlHC+uAIjaMnhNkYHDVzQlplpYNa75NMPLufic5poEqngKl4KkUwW9HZnlaxPThI5+7tw2ElAQlseHE1PEuGuQc5aAbCM4lt3lCQWqEH+w2SQew0soda6IhWQXsDNe2CcHQE1sz0Ens0/QrejVXBBClj8KyfgbwLbjOaAU9cGP20P7P9cygjsO/YqgO/PKpVedhtBeom3pMEYcr1csU+X2P++ec7Is7fzqlLxdSqQyA5xAqzA5K4jv3effQhGIMeXwdOKU/BZTdGLIWhQfzFSvLK6uJZ6wSFpDxBSQ1VxJxqAvvMougrEhE70QoMwhnNEeHIa10fkrV8RMUbhb/uZUk6GN5KPQ/bMB0GcMpqcWHZxcWJ23/P+8QerQ6AdUmvsfS1DLwcvTvHMnP9dG76DmY8Rix/S0L+J1YeS+xki6fg3alJXasm/TgCfYxBfGTkg4mTUV3T8vMEqa13jIWhEjPXn45SXLH//aZhbkYqGTqvNbNlj6WDxbX3y85miYti3faSl+DJqYp29r+EsYwmNy8MUDMvzWiJtPVflOFZ8MaRRmLfnnVaEb8UI/kcpGN7qY6YJXrZBr8I99ReHQFUuOD+1NfU0zr72s7CDeHohSxNzCxmt+cg8/UYPUeK5skARkuqYdHH0Q5bB7m8nbHntXcoa4G6Yu+Z008eHq4XaJoh2hQrzlQqtYDk3n+BhZeRE6UFr6MTOCUgLsU/AT26MJWQaG+pNrW/wJy0gnpvpkaAJZE7+wJT71sRL/IudVhj/xDvFhpyDT/a8cKvaffEo53hTCtUmDNeXMw0AEXKrySaYC8stSrfboutnAT+USOYebW/SQgiO8hqW+xcpCOpVxJFie8x7k/bLl7Jov5o6gRR91rLcKnFdcd0AiNwFIZU3yJ/0IFbwDltHchQJ2GjL2iCyVXbNmU8QB6tjZDSQOul8cOdQhSkhSE9ysO56lTn9fhe0YNfSzlOgj/Be66M8VPIwWRPmtBc53cTFFflBq66Eo+f5tFtAHVcdeHwjk2TeDDZVVf3pwpaj1FhXNVU+U2OP/9XSkA44dOzUC0T1uLEG6F2hgaOISwVKscfLOrZbkkGzDrFOHXwKwosv6UjwbvTTVhKN3lVaU65WDJ4stWbDm0LMM2k2d7mNQfniOaYamjpq2jrI8w1D6PZcaGIHVdEq/LfnlwGI9BCtirOOGRmvjibY43AtoAwfN4iV/+JZrMgik4Wo9C8uHK0sojJst4/7bvKDmZ5/eybRhDWU77LE41OIrIY100GyaRToHMO+56UOAnIWpElPXC+NGIm3suRp3RmE4qsQDGJJe9QDJg07/FE1LwR7UWA2+daxTnvJ0PsId63bs+NaPvxmjswBWK0C7hO9c/wAYGegAzKV7JqgVLlT1K+DaWkvC8Ob7Hz5p8CAv4GzZWqx8AXuujmqOsEw2ZPT/t0JbuR/pmuDdO18OiwXago2AENiQ/dWliyy2VaLXEw9ToyY4CuQgLLjVv5Ni+rsBr6wtBVgdJUbUyiw7fDuPrybzf5YjulyvFj8jXKPQRMCexSoCw3FzKvrJp96kkkWK/lGHsVato989ghR4or43OuLFQF/Xsdu33z18lAcy8k2Zm5qOdqZOsqNV4ODnzHkKuVGAtsEsEv6QWr/IfCnfwhqYYeucttEqHO8B9zPJIyh4zDL3alwWTF4JZa7LF6WsvSkLyA5AVcogkohG8U6mzP/FDbC8L3l2AwPfkAzW+sJdDLLuIAF1dHA0N5wUfK8bnTAYLDqYKFaD6U2C80QSdBdo5EoEOmeiTq9aGMc0v8MgsgZoa2k/JRVkZ2HYnNHHQThSYgcLC4AFLE/wZ+ekXemnFPPjVX1RTgS+A2KAiSrQEcMRugD2fglXf8pJg/DOqx8IGdADBwa3FeUd8M5YyFnIoJzwwkqdNZveFfLnNVmpxWbGQWj+ITGf9FEbHx3a5cEVVs7nhUiPAAwiR0oHPbbQyrUwTyDxvbnrdlqs7vsizV+rL5qrYy/7mJmdgHNDo0W2BsLSrRpEBQPpEwC5XzKLlD2s4FwB4eLegtcZd1AzqJ113IE1969lnu6oWivxO7E8hSOCU31qBnhkLMmlYQrpGzB1UeumJvvreZ083TAKl2clwe6sr25r4HpDEp3Hvp+MVix9Hi0CL6V+0H+URfoiFX6Fah6w0uxzOei6lOhesRut1l0KuYqaIVVWTvnPFv/kBo/mDTxNjl10jiUeyoVBeQXeKGieL/QM1i5DCsclyk3uX5rhpiO4vYqPbPjczrRY7ZUgdf4vpzJhUegHI5583trjG5+70VsoGnIo56AbrNPomKAui8pPNHyYZopTep1GgZQL32S6trRERIgk1AG0F2uwOTl/M1J6hvipOyPKhdRbvaUeFWtElavjedUyjkBtLDVPn+9537/HrHdENAacfjXrRSH/fINlYwsc3ywJb4yWZTbZ9lNoseKpxh2l3GFKGm+kfWlN599jUmH9BTliPPwERkfkNYpn6bry00TRoLNn+sMlg4ozK1Zm0Ar31wyf4WVD841mUU5inlKJblrNObaqo7HTh2pKKc6IFEeumI9vAL0/+vyoGln6F30eiY6TsETQl4v+US36zyX7UApeRek9CW5xe8E9Tpn1IcLSh+gbjwoceZGvD3v1xjKIWyW99oMqFju4OMrl0ynHg9XIz2ctuBUUt9MUN333uVQMFwbNvMmd1yavtxFOkVEvr+p6RuYBWPcARRlOpW3wl350bWDni7d7Rawz2g9BJnJhyLyfIG3iVE6OIHsW2plpc7hVySaf8rlDH8OvHu0OuNjssZv4s0IXrK/E16/6ot+4i1Io0D0FlOyOa9Ze/7s2ePa8mGmUcudiOaLR51C+PC/Q+2QwkrKJxNybtIZmQTJ7jMBKKIj/ZjiXm9ZKs2pbr+9FQltWh/1kU1E4OpJGrp6hUNbXj+B4ZDSle+2VT4GcC7yd8rgll8XVyZU7sMiDc42zpPrh9p7A3NktoCNLvwFhxb+fcxIsuKoJDhXWTswpL1f1/XgJ1F9e+EQRHb6+aeE4HdsoAhoaqJI/BsYyet0YABDJt0AOO+eloNTk9/8u4Spw+rKEqzsZ9MwwIHNRlyKw1Osf/P7LD/AIgvx04Usw0Y6Yy8kEYm/ctJXEPgPmT0nRkKWI4WpB3Z+Ysk8FULXNpRbLLzBUDk5wVbbIN7GdD6LMAK7MA1POrMI2+oVIVZnjEAD5ViYZZfNvolKQTJuEcOTMPzUQrJKCfdAbYIU8UiP4mmq33oWEJME89M80vxpjG3Ju9gM38bjcJDXAwe4HOACfWjCv9t4qLW6H8pwAUZij8Pz+G/9i6pfPXl/Ayd3XG1AqivE4MmPG6N1LJkQLxG4kNjCYoAYqDUoGCXa59TEuuP7ZlHhoBLaaZLRYrUm6uUdo1xHUnVCSqkcI75xUtJucPotWgIPqQFhnRBhyMtcRUbp+hhZlMoiY1+qrsqyepyfFO38LRV/SpDFE0BGHdhsKGyKvYdOAFxdCrujzJQe/N8zFz8v18a2NTJ3xiDq2iWUa+tNRZdUa1RrrisxhdGh4SxEo1tO881zRavl17NZGRQJLMpKlv2xpwCCFRxgtfz4cN/pLCODADXr8zGZvhybC6ilIz7+MlVjYmn5rILa2M2KEEOO0FUPvjkeQv52jLXgIufiraimklYUO6FPCSqTAbsnhwZSavbFR569CZVyHx10Z6eHklV33bZImbTDAsfzvxXEPYiIBpQuRpYW3X45tOZ4+OaCEQ2Sw7hKUyLQgtwwlndcnkf1P5VsJJY3s1JjFWYMwsTkrbinvS0Fa9FsLvMm/5LUBsuBFGcnbrF6ITREEp9ycTMuC2EsyRc+UjrIpzdeP7hnCUfDsFqTFNT6MCN9lpZHin9C45pRlMUs0nV0elbiS7+nvRyQn/BAuAqdGcRlYcvZ+IyAkeBSHF/x2rSRvJo/TLuKvtgf25lKCVrmGSs4nhbcwYgipaD/8B8XUIxzp2Zr+MYaaDb6OriYlwURNcEQjvkUlo+8pydMMgi/WQ5gHRha+LpPLvQ0g+zqOb0rV1FxZ2wQ0NNNLVlmVpa/fn8PWlzBpl0TrOSiuPle+eVkOl3K5Q6YhO8vqrdM/bheAhnwn6Uhxntx62WATi8gJHD+G9KjXsjf2glvhiN4N2MOfKJphd+8cfiv+iWt1xVJeA6mJSX8t8ItOoWB7bHBbrLJehRPOJm2Z3rDsDAFwOYmvybyOudHy9O64243nLQ5qDmVXLJzJNHhM5EoX2XOahOkePpDekKm8mgxQgHgdOawZJzEjf+r+eDDhZF84gBtdu6+20XidePxVLtpovK+VMl2Bu8oonjS6KyGSTt2pnG3YJ78fiBdyzULXzihTeSsG7VySciKva7GflVWDG1/4me/4iyIY2Kpcrw12LHkvwKiJfrPbpFCuZstJ2CSJAdDA7ZajexmbzvpMKeacUYMGn3w40omPPGvPgQhbI2nAMnMcljizX9/E3/mFNn5af7yzuqbUjic4weE5U9VsjhVet/41DWm6d3jkELuMy3kkNhaGrf7gyIcjhDYcIUqjpUviA/nWy/MK+njtvdTxPLyWR2g0Enrj/hFDQl0mJDV175IscIIZvOrGSVgJ2lLtZXtmuB/UkOZnaNKE+ih0ecT6m5k9JJ6L4Vo8Xo9pDtQBgYUiLwE/OI6JudO87oNys9WeHNn4wncon5/+ga99B+Z6UOmvqRPW9nx6q7G6QeEV6sqRYHYUJKSquMdC8JQYKqvVjlooRayzdWWUlKvYrcCMM9/AzYg8wDoU4lW3vh4OBiQ0hxnnCJ3bsE61+/+uBf55RnxDkZsU//HMvDwbh9Bo2SoMa+T7baUuv37n+3zq8n/LHf0eSEn7ZW7yIiAz59d1pS+hTMCni5amxOgtOIS1MxCvjDg8jU+KrpZaKXqEoPGZX/ixzqWvQSYU9vDHruwWbpWOphzC54W8BZXCOr8URSz/XufEak0sp7y6SWFR+ZGOvf0AhPVyEOqCXReSjqIAy9OQezvMM0r8czAdjDzJiAgSAai298G+oxRZ5hKECzXMSQkoQA2k6kGs47nr0XqiUq0sTuyCX0tKgNM6W+vkBeS1TFZGi8ZXtJbiQRetEuNCsO+5eunY72eIaPLJUjCIbr8QIt12cOzB8X09xxqTqsHXLaiKKjUR3UB3/dRLU68pSqzoi1YD7uvE7CtKQsISLYQD4K87MjJneILtHZPtMb8aWJXeP/SOdBDjVbnMJRafGG+3v1EvNrbH9RcGMvstMbNE7SR6POHQ8GxMiyDAjmZNGmzIzz087PFPBhCZ7gB0/izMSIUXiZcXJ9/P3LdyoIkh12tIyQbGvQu+xWns2VfxRp8cG07OyW0WlYMGiyECyyPHOuzhUEnxGkVz7WUeGXZozuvhSJyZRPwox+v2dAJV74MK5Sh9IdCwxYR3K3MMnUGA18InhRgncOfqYE2/9aIW0GS+/JvLjInmT4IvK8VJgswRiwPFn5oVRTMSujZLAGbxRMFvxCtzCFT1owI1k2jJ/qmDRkuciu2F4FSGef76PLo+uwt8CQ5MHNkXwF9YmyRNfvYGDmhJym/fVrxPN0sHhbS9J/MloUpp8JKl7NZ5k0Mc/QwfYNEuTuD838pAI3qD96vAGYM46oFYzkQEgRWg35SDiVlMne2kAd0EDg2CmrKqgbWiK7+vlC0EG9InRwQNSTMaU5lLzHVBzpb92AZW7HZT0nw77M3BuYsegWEav7R+PZd5tC6oXKrdEc7+a49RG2fgPJ5H9yxqrWAxvgiJ/erNg1tqF1GYwADZU0VZByI9gTt6dAD3vTpbcV4KrareN47o/tVsbV0vcpvCcBjvlzympNBIfrxMp0GWDMFWxukqUdsRONnDTFOBjub8DadxMbqB1jthf3ZlGoaBBIezz739fau2ZXNH7MQDNSDg34wNu0zqPmLgRBOa7jBMxKq3qosYwCLsCa/2hu6/uIxgWCw8WNEKt9Efd+B7TIMdIxYo7OHgkb1v/EMsDB9K8yTLyugp4lZp4q6x941cYhtOIvt9q2xh0UfupeAvOuAj5E+7x80iPcN0SJuxBE5quQB6BQMk0B0EnyUAs/7Ht6wD0Df2vbBE7OxLeR06Wsm6ShiQS6u9NPj9BYqcImKjElwyA8cDkWKPjRp4PrRheJvJ0NwIaZ3hJQKXzAvFwahV5tTOOAvET0S2fpLyVZM2hvGFZ0GH9xNdhE0augBT5LjKTwzfvxRbraxYxJE9S4ewVT67kHwWh6P+pFXmeh6Om0060zJI+EyWAkZVE8WxFw1dh+B4D8pF53uCirmR0ge1JerfSwlCTGHKjLb+dZttjmFLVcIVEQDVNHiwIVO7uap0M8N5bzCQmfQBMfKbjDipKBO2WYOriJnIqEzKIFZYESgVfphmugo8wkFMrqazhlpzqeDtoZW4E9tao28CLH8hSgYQxc0bfi5VuylQRxXTCpZa0C244pNdP1fU8oVj32clZq5JyIRWlAGxdFw+5iM6OTX13w09VA/NnFinOzmQ5hjrS+LPqa/hhtsHTn9AY1LmQOsCP+ooK/S7HXufO3xCC0KRUvNZygjuN0qenFoqKbgMJMAvumBz5bZW0kk0Lvl2bTYTI5FgXhg7CtbZGGOEwxolqEkUuUIEOVGYBSf8+5ZKdTyBGokOx3EgPuNZ0FgzZE6nbPdXkV9UI5F7RF0a3Eu7rYyyxA6UWXVd+OyXDBoHLPB6+g7zQ4XAr3IcMxNH5mQw0zaBtH5f7H4kPqi89rJEAkiQJjvRIFKGYgG8U7d6gIddVAbka2Dpt5nhmOa3a3ZSciU6dhIZitepOvlZ5nYuILyP49vE/8FkK1Parf74hjzbcr3Z4yZTtQlrm5yISjayOEO0XD63qp+xVvnq/L3Kz97VsFhCv1qrqKz1M+tqykppyuOt6UnJ5cURXBAVxB4AYA/CnEUu67MdN/PO5d5wsjEoksgVZ/VncJvhI5e+INRF8fCRodkoEi7SlGXptZMJrE0ZYaIL+KLU9dpVxo2mCV5L5gDj+As68MwTpNBYcxioql8Sez1c62VJy+r9m5IR97fZvDcZvt285ESyg2EOylFNHRkyWT33zsW1Xy081lZoFFwxuc4YCnpkS48W8x4lbguCQki2hyEU8ojmkTUjinuUSwZIK6kFwmOZCw5dwW1DfAaIoqUg11hkh6riuh2fhQin6CD5KATwhHl+VwYIQEPAlZz7W1haUzusrxCduN/GV+DeiNUQy8yGYPuHHy5+0OpF44RIibYcug7Pzgsd0MNQ6QIY1t7QY86zYbv9AZBKwJgm4imfO1XIZcN2o0qqTRpPVn5ODdEj8onDMUUutW/xiq9q3goC4oypOkzwKg39MbCNKuI2bTv87P+yS068RboPyzgYlmn8V3N2KR+fzfy8hbxTXXS6RpmTwtzZxzkCOViLIxEp3FJ+pBcjrjRkuEMCu1hnH3wA1g9M1q7Jpy/OLkek8KLyfnTFmUS2nRkhE+b/HIwJW1olHHE9UFm/574bIG6eKIe7tIsoLkU1HslfMB8ZUq3vUoWm6d1WtuRd2gxWn/y6LABZeLpV/o3EA7Cvw1Y+iRV91Va35ggtqgEnuiG7CwG+3xfpf59KNcpl2Y0mcJQuR4WaRUVUJXkGndah9uIqUPGvA/wGt8xnCOuWi02BsMR1tuqU1QYZS6rhQQvCet95sHQRwyFKLqdV0fg7JoiQyddyKTuJ0lkfGBcMkysLk2/MXz5S3Y47Ykcc+ylMjzseYxZ8yvTljBUDaBQ/pV8Q+6fk80cSxE7tVxzh/bbYfWnQN3U/7LtWog+JiI3iHb8HcaMmHZvYcnHHQpXoOBJqgyNJITgvYgdzMqyAJzQhzAniJhRA5q17ziSQQ4oNKOc78NoEXnDw9dMvEqt8wOFsh0DxGDQTKut07c3Y0LZCzkKQz0w1FRz/En3K5s9sAEofyu3f0xgJl5JPR5QG3j0OcpijlD/uXv12XlvczylTpP7bMP0ZEaZzBeP0CdFgzjFjpRXbpfmJFdiKHqoxSkP6l7eeV6onmPwldbEDsujLUyx4oOFYzGh4YHRG4/bs3A3WA5y+6F2fRh4RdwzXpZGULIjwTUz5NMlNRoYpBxPlIrjNecRlE6FPgcj9kdvkJkiDR5MKijXxNcbgV/Sv98aEvNN7ik21EDnk+UqZX7N4TCqWZENhqBKq3z44OkHjkTV7aV4taqWRuUhoy+N8VHfERADR/bqxUd8Uft8CtEB7rLWmv3HapkWhvtSPXRoly3Lk/o9oppZlfw75RkIdhafdsE0Ld5j07sODFbh8Hm3/2tZQ1rjK+BCJU2yJu3m03QNHsbhG2WWzYX/njrebwZDVmRxlDZtzt739IeVqWgsLR+FKx77YRHp+zbLARFdEO4tH8osOY+lEHyf7iHgCPUzSZm8hVA5jhNo7SPzQA4cWHd1NFyf1DJwb/X4/QJ6V5Tj3cst+X/9f6tYHsKROf34IpvgIEl3z4jT1FS6n5ewAu9jXtOuTeAdhG+EHcTTvt3B5h7GiTle03BTrZ3kkQ7DE+x9K7ZTp9jJ+vn9RjBv5PGSgwvwM3pKaxS4e6/XIDIZ/5MbKH8LgdzARVs4Pj9YdYO1xvrv7f0J9Lb4xv2LlNdKT7q2cdEGQeKlB/dQ2eHOKTwjlXmQBR9Kp82jaHSMSM6jF1NG5EVUL7sv+mYhQlqQyqLTVXoFEYfS70ka8an6z/d/d8g8XpWTFG9+3bF8IzaILkSd+zvV5r0KFcXAmBxl7pKI6Pqmz10s6p5faMsL/y2S2oLMg0XoCQXjTN7XEgiw8KrjMP+HWnIid/O3X2PvSFPDIqPcuQsq6pV0+jgKpf/lwGZMTDux6ieviXGO2jg5+I5nmKW8/DHiUTMB1h/bYB5bQQgsNk+Pw7s6AD5262omiTF7Jv/mxZeNnBGhknLZtDPQZyucBev6NBlPrIniQuw4vyhG3l0akZ2CYw1AZWdOiOhWnTbKxO8j4a3awRP0BV8X0+tBueE2+ZwgIxPxl+t/5hHvAh45gnr6OU5OY+B0Mv+qecCtcM4ppZ+Jf07QAp4mrJC0/HIKhAhfe9TvRFJHnjvTIpCIDw1Qmt23kxmlWr0l9Z1BWWQ13klnd3FRpVDEgYPMitOEBRvjza0lCKbDBg7Xu6SoLWmLWIqQAs3ZgscgR47JSlUQc/ZL8Nc52lnE3uEZQ3w4PabG3NUgmW1k1mUw9mxEWY2XxznD0afCa40923HieGvkezS8ynBTrP7HqoSBQ6M8SIJGtpciVr11uEmganAWvQ7GFuh6MgNoCCAHKr+6D1oRn9tql35fZp8ZuunWEldiswn4Ue1MRHpvPWzdzJa9cMWVJba3j37vMiJ402BHZoc72aNQBCPknEcfhvhIx+n2Cej75aEss5+MkgPFDlWvUO4udAu4xOQK4loQkeQ7M0Ubf0rALbyTeEiwRR886NgUXw6l3f1hzRtygswLkJ1AhA5zaHX6w/9mjsEUYsVotOdCf2NOPBep83KExu+fHhEgdTy76rcw1iw2MTJsgUBU1zYBuYtpZRdRFmdXXT74H+n2YBAKAgAANNu2bdu262fbtm3btm3btm2bN8QN8ti4qu07+d5RDPX61Au24eRRUb3TWXFOtYGu52g+a2WXLG8L6xdPMHgNdpNisT34+pOPTMFzOji/cyq2spfzZ/s3/6RZpcQrm78HfoY60yOApJVShlXzkrAvUXiCaS5DJvDWJF/FPfA5jRcj2Jto2/Xz7uK0/hiyvWwpCKd1tc4Nq8wRCbQCKjRE5Ssrjd/0VyP9xymg2wU8mFgqEePAiHHt8Ed9nUZKdjfhtSQthM+3oir+nltC52Gk8RPc6g+jtOC84txFKTUpZYLJrJAZCcS0ByvIsc+XWyeRKW50M052OoP+EtxkldcVv9s95sXdvSag110H+WHwBJMOvUzcfvVaMgU9GUPpTtUEYR1c1v3Qns0C8fhHJjJC3bLz9EkO7WMHAjXLjdmLSm+0y7JAzKiN7jpIlwN608gPa08wv2m0oJWNqZfnL08hgg4zdwBXRUb642IsyAhSBGr9TwMVlsWu0WhVSW5CPz7Zts7GQhEIdCqfIoMzfFaeLxMiHlEzeujNcxPQy8fwnBTCJ6EXSzNUUKoRmilMF/ewD9FI+bPoQ3h3z1tGZkZthXfDsB6axsxC34ri7e6838mA9DerxA8LldVjltqzy5PTshqOPYBLnx3aaKaDrzzV33Hvk82WcjfxNZOZwksCulnoGXq5BDxBDzSBovZnTpPvDmLsslajLaRewaivZ5ytGtMbH2gUov7GadPwja7wl0co7fPOuZzGNjrVYQIV9qm0+9HLye7hIROqCYwdccmHF0FQceugEoWo4al1eYrC2ncIUGzS9tOb76ZjjgfV3neURWcMJVUXzopLeVySxo7uSS4lp7u07o0CKARUxH5O1PIERg6sCOTc/QHj4XbNCg3JlQmE1TLKFk/S5+kZJDGAxCp+ezFA1vJmLZjEQa/IFT+vz/YMpAwLYZalBwAdx9LxrhH6CneV78E9FLPGTexgujlVs2Ridrfxg9+B8Gkw7hUjZR+E6fu1UwCr0L3z03lgTgD88uNEbFufYYQ4hbXerzozy6jzt5nVvAeu3npfGr8gFBf7Xj+4i8ZjPROAqIU5XCxH1W6Rup0oNs9SwPTNANHTt3a0xD1TbigZr212G9aDUevdbDgZD6fLgyUwDhD2j/YsGow23mkbSrzX2oh0N6jq3YquFL1RXQaabMGjjkHYmoa8zNX+mdDHrJv/GesKng9nz8QGYyeHRWCKukltmsDIINEMoy2UAZupZNOV4IZxDGZJwSixFrGVu7EOUkn62+Wxsf8wVMcJ94LWyWwjP8cSS9OwQXahOHv4dknHr7WQsc0SINuLq03uh9GlU3c9nA6n/oiWAZOclbDtRiFtv2aTY5g6dHR9JYD0PQStlfDyvXJOhfuQlOPw3KQDsvBsOn8zNdh4+TIu9APugZ8Z6cI5hEdnADkY5talgq9XNb4HT26/nZKPmXj1Ye4XKKw24PPojee2qBYGjzKcT4YhsnP2k92JCM8VwqWeKoKeJvHAHqKgyoNfR4uPRoL3lRdqnSW1i+9N1+pT3PhKVH35wSDlZOoM0TKX0XRWjvOIzELg2F7qjD9YCmSbeQr0CtDHfrErsfNU34Dy2NlTB0ZtEtEUI7X+UAkyoqMok2eslZZpOh/ByaGpgGmQ5HYXk8XUul3nK22KKJgTiJpvxkJGAjXn7G2OxaVpvBELynPRZnnj0YwrMbRi1aVmQm9PDhxS6I9W5PpTmPcax4wDClvMXsb3GUw8VJ0HTtZAUzkjpeVNjEsO9pcQf/HYOPrXw+wpQOzsgNpAoKxRA1B1T5SdMfu8bVavqGKXrDd21VIVe3R4vWh/LVib8UxhtOPeHxt0HsbSq+ZL2AR1aL8AuWNydN6ztQFw5BjP4rhHNwRtpPHs9uMREeewinam2RimINk8GzY7WatLEO1cDaHyK9SYRPNXgkHLBfcudKkeYR4cpGMf6LnXGpyMpJulMDokrQR07Yji2o2w+5TMTvDRE/EuNjBXZKYID2H5fR981zV4feRCmkPAxvwplSp6wpvaqhfNUbDdVOz+szlR6gF+Po6AiSyuZ8L4lxslhg0qg4ImfAnDvdzGSnDB6khZAOnUVWM/iBBLfMI6eBts4nxaY+rtDJX9q8iwV6srBd4PoNEgYouqHP9u/z4RtV9nIJRr6n7Awj9w6Yv1h3jV7hZQvCDiIQACTTx3OTF538c81soKouT5OVLycgwpG6RX00BYrf1mapT5UQgyxtm37/45pcS1KjqYcy6EjWXMj+2WNt/LWhIhPdAUwkltxft2xn3QZY1pD7EK61F8IX+VMF2uSaoFME84cFQrQb9d8y8g12bTPPZk2OtfB5QsL79A+kD47EQDbx428tzl0yN6tRpcGnugTaMO+KQuPviL59oqumnUi9uWRywluKx8cHUPTN3Yttl6xfuuzGJV7WtIyr0IKk+4cvEZKUW7PW5ErwJd3WZUIBVVKKfYvBm9WikXzDY3O1S0LiD/OhWJ84eeNykv2yvNkZSUqSw8WyOgnsrJ3GIdCklaNeSy35IaqV2U2UHgB7g/Ze7ZWc0Yz6UL5iJGgCEailU1JXo4TqnhpeFE0dbms6wc0yQ8KehURGsv00s10ETS5z1bECHKm/a0WDre+TCxOvobIURrscIJKioXQHg1gffOY6QcQxx5rEDlVOAJ4NiCZieBLhmf0mWtQFYMzZZ4gGSijN9llfyQzh1qKWAj3DAruFjBikbv5u39y1PFPIkGIDyyt504UxiTDdTaA5cE9yheOPST6+0Uh9lqglgiTPIvnHc5blp960UvD1XlNfgW/WlBjAPWPksTeXLQFoGgW1tobTp5OEHzbXNmrcIaIat+nq07Lh5wpGb25qx5KxPcooPqNDThE5hymC3ZFZAQx3RfvUvgiKMtxgTjOqHXrqLEX62OmU3g67RGyMNwYIMX3AfgtZy6O+pbHFGPY7SMQDtvKT7as7zcL8tk22EgazNVQXTB8MfAKOD5DGedpc8GzxOK+pCOdjPtKarbwkFmwyVbm3s3aihJthbfYfCwsXGxjBW/S1N13GwbkLPY8K5QB7zdYjoCmU3zlr+88Sj7Kf469E8ne0lCT+C+hsFlygSe2ZIqU+f6EDmgz7e9af/w/Y9CeX5Gf2M6xMJw+LhXbtRXB/xzXEgXRKOG4cL2aVWiGRmx9T4URyybTHKvaQF9GCmRjwKExWbhtJlk4yn7zfIPHs5BU8sqYa4Tiotv9ifnJVo8Xdx5X6HsCExc+ryfJI4kf+LFCZ0C6loQdzWmx1fAHs7q4gkeBnBHQkEWk/mLYuZ2UckpkX85q7ETf7hc02WfL1xh2I539lroO1g+a/62AWGFLU9+Gm2ytlNf/cnkmUR6YRaDhu8XzMoQLbSZV05Cw+xEPCmIplARC9qPVfbNoBDxv3ZFZ0kNKh0LfOXyfbzN3xMI890CxuR2MBHMdL7ZRyI/l0gXRVl+pJFoEDYdgGZcYGFoUgauQIol5nzuf0w87WcjwLgsUmv+lIJJbNS27AjgeVwnWg0F9L+4DT2D+s7ndNw+CUn5EQQbTo1UtaWYEZ7B2RtYxPdVCZjvVlupjDSPRrvsrpE0INYXvLSwsuyKWRZxp+HYQywbU4JE6eQLks1auWMQ2aX5pyTMUI4zNFSJTDuz7RN2P6GhjFm4pZjyXrSm2yhDo1sNRgAl8tQmPAcHL2ii56SjVBhwVIZ760maCjtjl3Z0H4Sm9PGNuWMaUkgC1CUiXTFCynAMOeIwcQXps0P7FklojES1KlrRZWr0zEF5OcRaAR8NoSKbHlrA+wJwHLoFNS1jwOGfL6Jr2Ibs1SHUmGn0VC+ft4AgA9NN5XQNBjvKoykqC3AztDTExVsv50rUc+ZIyVdgY2GEf2kbvXXq0WoSR3vi9YfSqLm1GiW2mpsAy5jl3z5a4qbu3FinND/4avwqVxkULSfsNpRV8NsmpP+0piDS3fmZZCRuAb7e4uuRMAoZXmIO0iyUtj7O4ntaxhyGjhUcz11ygpAlfqyKeno1pbz+6fD7w5jR9zLSxj+0a9k/VeYq3AMK1/8LkZ7hYduGORxIwIdlTg5q5UFmKNam7hKdf95zbPprYJt88+3PXGaVlVW5d3E0dadtCDBTjLM1uF58CrY4jaJXriaSmsEXKN/cjZBk7rruufwNG78GiZ4QyPeAZ+R0xcNuAp/wkAFylwGzOXbjdqPJe8TPoMh5xc4cmU8jig7nk34huImGSr5Wyu61trOB24DKld0rgmpDh6cz79c5Kr+SGdTQeCpduhcjnTuLAOlNUauEO2IRmQ6WvJqViDmSkRFUaQGrcrUT+jrys8hwfZdtz5Ont7pJZ9LGgrZlslk+Jq4BVxAeSQ9PHj2WNQDSexWUq9hYOKRdtg5JTAHXucHpx1uIfaXMM69svpYvNBa/SWBZe70vdjPu31fwy8hol6Vm5RnwMARIW9Z8OLUhoEuRutm/YIZLKo/73ZAXFNOe1XqIvtdh4QdlcNdTDiXPjiJPCd8u8BSpa8JLAy0O9cepUBJHcoI7k+IWoXXBlVQ1keg6PHqU6SeViX4zJZdxzAP6fWKP3iwqlHaj8WNOZpFPYn8IeD6gXA+GgYVPX/u1+sirJwSXgHJ/Zyfs6tS1ghHbs0nGFIt/Rp0N861zxk32UPLTbOONx//nut5rUjbno/h6MQ9Ngspg2+xNFZziU6EJVfwEm2Q8YITmJB+VG6kwvDb8IA0rFRnsRsbM8GtZR/PXESmef46nT7Y6sAUwcTik+T7BwkF3NEPx8XdgcTzeU78uFJDUH2BaWkqbAwn6mwPQ22/nDGMoEsCGMA0w4n6/yh1qGYuzYnC41Q85nD/gRLYohy21lpMf4Xhq0Khg6sS6NAF9aB+6l9ZNQjYFslur2lg4FiyxfvO8+K09Mm5jO1JPbNFmKMxkXbUDgn/ia8zRU66y/WklANgI9G7Py3995irHYhDTUIxKngzSIN1WMWHzI1Prxr0tVg9gg4kZMLRmfmKlaleWTyHrVmFTCV9jrNOcxhtXlkDqtLom5HgCDcu8Qr2CUAv1wKRiYzFtp+OWRr/QDfEHr4wNZU8EeSjPz14+ihpHTzyVkvRToSCFJVxxcocx46jM3YeoLY4RKbHF4RUEeNB3eMUZRxM0uLOQjZyP17co2Jdp2EBwzwZ6LtwWb6uX+4IX7IsFn/roH8tUAdtYzs/5+xYQAlKeUZTBM46XpKhoIu7400kuIh/YP2DswWzmV1mV9suNlqlvFUdE53uph5gIjrImz2m0Q+LCuQhQmOviGeu7K45ymc1WSunlejNdSNaEFWdx4RRGiTGcg8ttj5ZOuX9CzKLvWzjlWnZ1T39epZQaupv3chsSE3M5mer/IoXP5hnumP3SnhacCEAZZHHzs/B3VWvMhDy94hJuToMZFmLqOalNdGeqQNNZSg94+zqjKgokN5+Xrkr6M5aBplbl0DGBZ9MHrpo8dQLHWBGW6zAU0enDUvBa6gGPw0C21OQo3Wk1dH0efP5tnjjHnNW5fRV3330GQNPPRvTGifXSoiOYOQqyfqMTj53cEoOZSq2yvzopW5fed0Akhci4Vx7VN0mmW3ByPB4lC9csoRS+rjLqSfbYz+2LGIpemFgoqkCfYwtQdIPUpm53+VgqfricaNliqNIiN2kPoihcdKNA8zZUqpHueKU5lOg0k38tgG7L7m0MaGlto0j7GnAXJxtrQ3YwdBsdwhAt2Cv0T5gnNgdwKVlwCDNd4ea7p/B2DXUYGB7eI1hszLK38CmpoJa8ZsGeQgsgdSsJnDddR+6c9TasAp2ePSMxF8VyTJIwC4axQP/Exkf61yGtkvKdUjn/4tYqBjUqWYpQJQZW6Y6DjHVIAOSsgS6RbsH1SzpMa5jass4PhpDhGzzNgLyaAT+4sFarpj7by8sUUiO8cEDZOh9gq/i3FIrwqww8NO+y4btYi2agp3aGKS/3wJ3ekDkoMn721KMyGfk1I36bmhIPBdFZ2WBbAKGZbCrSL/JuuzAQGAkYJKqA6iWrAlovaFToGj/le5zTtspSUzhKAuP4PXDDL590x6NjkRedRhwHng/9WHeO/AXeyvJ6nv1iukgUIyfmKAUJtgywVbd0tFIjVoIMautxU1+ot/MgEsvEuPMZkh18SwToVEyfPKZkgCj7t+bVdAbdpUYP7WXpyH354jgmrfM8l7cJHp3fvLOu52JjCTTTSKHypQ9uUxfLOxbkyFDeHHrjMVlreJAn4EA7KVuES7vKWy9F2cdHfW42V+6Y83k/AjMEXDNO1FXw4oWPrwqw1rfXwpLLIuSeSIlin7xx35Ui6GsVFJeNRfhw1OuVJsSFU12WfZG4vfCRn97KUEmH8Q9itnMPFNdMR6ycCbWThHM4vGah0a1mpZWBEwP/eQi8tbWTaOSqxmWEEUgoQCjAi3OXND45q7KS43COSwJj4w0XMdDJKL/ndo2Q3Qqv9/bk5hbMfSvbamBwLhQJWWXMO+g7cfE65ZSjEZ9WgDKZcpIv/94pPDy6SQUWRQEa08yan5jAGJzRtSO3UwpCi3BHEdhqxhNt07+OIY4CxAYmWRpBfUuPfA4zpeCerzrB1b5vGOwANR22pYo8FnTOuZVUTwCZtNcghx6EtLus+4cVq9i4ihzMZsobbZJe1j/XcPM22JoEg/OXDCJPIanv0OVYKJu/ICd2xWQ+yp/WCMGopxWehPNqBueK91kBat8qAxaTLqaB7OsI1L99KVlEJU1npW6VXwFzWA5qqZjUs9IRqp0lqlpatw7LAusg9Dw3oSgmHnM/g4kjZ1y7f/V1065iv+TGYtX2q03iNzGv2m9bnr5Mwqgq2VJajTC/4/IBaWBlzWeyS0zC5Idp/YIJr17azxYp9EM+d0b2n3mNl8Hv0SWww30+RfzDUEIE32OBCyCo0qn+cBJfR0Wtui43nALFHm9bWRnUpBqcXW14an2lIcXI+INOQqJEjW51BH8Rd7sCcP2aMFqzVz+1D5odasUTaqUD6KU+RPGSf+JqqQY3Qp3Pk71f+R9bnyhTuLvuJJ/L1VJkAJZY28VvRvGlJH13PFNAB4k5469Gkn6dcs/xbJTwjUaqOc85XtHD9j84vv3gfKOrF4Ww5Io7JWdtuuMbxPNx7dqLw/WKn53RhGMfuxd4bRNVqAYp1NR9+ozlPnC1QRsCbwWvyfw3nyEdPRQMOqglBlcT0yzVaKfJZfB8zGNKV0smocP+2agwlwl3TWz+M01r6RxIP7FvTwAIlRn0AsHDfsHXa9pYOqTwa3CmX4ODY9OpKlPISwWhtw1G7pUAyCYqB/lZcg1IsFPQIeT4sg67tj4MJyYLCmlxLC9YRucoL1wCQXMMIhfwCBiO2NMujbBheRfVPotSs9ojr1sjiUYtTfSDmDSKPXrFKIEv9HQH9HwAgycWkjFOQRxJB6Gh7Y+5Gg7b1gng3A8tmldXWJ4tCCVVqGcPt6VbvldFHbyvxSMAvhuvVfNYEmHIyLa7rob5iIxyrEEh2PF09+IHgbcFKzBfelgeubDA3MXnKK7rulUJUw4G0DQoJenah8yVN62FtYA4UWZoVhNbL7r1NMWlPHUDCOy8kbqEN/fWMG+xxwZfexOxiXhPftj4gUUTlgSaKUtTiD7g0jLTQfnP3UTy4F4aksBTBe8bKCG8LhSLDcSAgcswdtoAd5Y/lLozpZUaTILHAktYeM/S2akJd0Gyio1IbtNQjqv+rfegeYX0eSSMlzIr1s6C2tPCDNVzJwRGiZPItFSqHFzWF4QZGHaqLoNmTP+2OQw8Qsyv/JIVjX/hE+gYI1aXhmpHZ83Vk3kvHH/xqDlwo1Z0Fy2lD6taPu2sKeAVzjxH3inKtjQUWNVg2AnvOY/KM68u4vorrVQGI5gieIATlfqnAMUqvlSKbeZoJm6EwMRC94jg/P6XH1erN8BxWc54Z6R7cxgpn4t/dxAxbOM75Zp7+AjnAtxbbRCkFUNBWe0q9Tk0zQNecTK7O4muTFlesWFQR/gOKDG9sFJ7XRlcUxtu4GH8txASmcihvu7zg2J/LTKF8d3/GtITcg5zATVfgZsPkDzzn7yQovWAeRaLJ3XU4HVSbC7s/R/9ZcGdVNlPbAPVY2FytcfV/cNbYPMMvvtKZ9Qm7b8JFV1dzKMZVGP5ddEL0Dfr9Gxtu8RkXIYpkI5oAlW0br1wKle308tmFzgnAs3c5G5aHvjH+a7z9wZNor7uWFkbbdz11tY0S2FNWbV3qHiNog0/LHR+7Cj71r5RnY3IbxjxNK1E2W4il8z0PTZYF3j+suuAK5yKwDJddin5vIgy5cE/QMMjWZ7h+Vtgzax2rzDpgggip6zTMf1BA6sZwF+I5p0v9B5RLjGMYLRKtlRisSKccKdAPYSE7ssvujvOD8Cnl1dFWzbrIdWy7WEBMMAJaQc2GZ3FX4IR68mVSBgH5rlJ0axt1fu3XksYHwW6iD3JBly409r9dKItFjkEzQY11tuWzNlG1spRSV8w2K6v32TtXL2vHFUe+NTnymDDAAeE6YCZdX+BbtkA1pWZrk3cGlCFEJDZlfisLDTNw55oafeMGaLKfbAsh4lf8mNQYEnRLR/qacaNwIQtoqfcRdzHYK7FWMgvFzlDR7/bDLNi6xvSwkAh4J/ac+LBE0XYIkAZoyaLjQiFKS3VoU/3gJTcpIwvt9yjrvhQPZShx2fJq+LP8VY2/A6nT8arKgcPRqJbeJpgwIxMfrUUWSmrsJk5NFyfpMJR79jKMmTescsJwXcF4o/PPx4ltmDsCHSpFpExSkaygfEifYcCpjXYW/h259UJSlg41Gl1QnOvuajunQk2tQLYDZ1fA7Wfa6OIdNuEoJ9Ei5ncznqCe+u1RdvPGNjObSDIxqiGWvNaXhFJ2dbREuvGrPMNBBKlQaEmVe101tiIaMq2fF0oY9B8SLgvTkv+yf8TZENmvUG97HcaZfwdfcLyM3RFK0PeNCmmE/46yKOXpGvSLKsD1UjpMQ/L5S8bCXvtSL5L37Iz7kVTVtvkLm3LHkSLwehDySUARe9cigMdA6k+A423xbzZm+NmNrqCgLqL8tcPb8ZFKKTivk39+BA0aVwU8rDYDlErNyo1P5L5nAtpY4FXIjz1On29RJSsRP7CqRKZxJyOABRVovyRTLu9VSWFlQ0vhC0yycvN13l5v9opNBHe4n/JmAR6S+bpuLpW1bGYMsjibXxS4UvygWMLPZgriC3oj4xQqT/G/DDkzewkWbc85VwbqDR2tQWOP2mgMvk10HA7J4BtxGjL31PiJntc2KmWTKNuPWKwIGD3RRE+66uAE3X23ZO5LtK+B71Km/Vkp61v0luuT//8CJrjJP34ok+iUtAUWXHCRPpdZprcF2QJXUSrUwasIo+WPp2ECB94UadAVibyxnrdKQaRRzTyZWlkfJnMtyy6z3Ac9fmeQDfBsE4Ud3rFp4Uv093EjWTgLndJ2rB0O0PPZms78bZZGtDm1ijFm4Xt6rDwB2xCqN7Zrlo+TCSzjpoLv/ytbJP2u6kGe57t/Nlg3Z1AwuM0xDu47XonJhAZ52+dnL+JaAe4/xDy9sjghiUYaUFfkuShQcvVpQ1YojI41v/eBplihE0l3Ro2ad9BtFfQAkhRlmzXXMe6x0tQZkrIry5iu81u+MLU4v4R9HGGa+Q8hQuNaXgn7pNo6wwHd6BiZyq9pOcY509eD/6bfahUVHMbJ047TV3OQOxbXrkjz8hYyzdqQj0ub92/LGTn3ZKvYPOQjoBM1JcDdcZMGaCwLgtlWqbNxs0D/zkJumiIbuMxIbeYThmAgIqRhdZQu0w6t5X8dvTZk1EsK+X3AxLLNNtCcHCezJFWVk8aaR24xIJ1PB49UTnR8FU0vqQS8NKIaoqMw8zXWMhj/InYQOTnjg2QNwckOmCUDFMRwrj1SkzjeIWSYSUX941pDKtimixGljjkJkxSiFNRpZ/Tit2QfDoC4LPgfAnGSZFrKSewBWb+aJunzV7ifPJDGOKeFyscQdjQV5QXwZLESN7MKrh2h1df+QILSOeiKPYvE3RPSby7lGnG0evJMEQZU5svWnZbcicQTcpaUCdvLMsdRWGZcWZNwDb4GHDnMG5qWyYroTvr4niIhV6zCgUHucQdVim6XO+b2O+4vgdiP2BNeSHqSwk18Y2B9PtRyIcIvbFaVQhKpTIrMFqn5axCjbvEANAHKzzQDQ5hA55tJ4ciKwJSiFCJXJfM4GGENcZpzJSlLaIf1GrwNFt0oHKNx7PU1xKoaiXK1W2anNnO8iNxdYle9e8nRfvn25mGQbK4xiQjrhcoiKrQDy2NN10VDxvjFp9zkl10cONdaBpfukfK8Y8AB2AnPXSExgeWs5iDUb4LsmZVVQKvXZdTZNPnsxMbDvnNaa277qnB3fqcl62tMDjhmL3c6AybY7lzIgFCUv6OSo4R4LNet/tpNrS7ZTZQAm3Bb4uCwCj8ditsIqrVWfKsaUbohZ/t/dMW5JQv/meunw4QmyeEtK+al0CSPH3Rs/RJ3lSZmkdHzg2u7UhvnNq9QI3zL7U6V+Z79s1VtWxxRk/Z0/SY69Xd09dE+EED8at8uU8RqVRTzh2L5e06AfDZW846rx28ZoSlFfrKhiHDGv3Dvwvt7AgYqkMgd5vKIESM0bCgnNzs9sEHz9QeJE4IPIkln+Rfy2Uzd+tbHNrtN469Z2fyy4Q5Mun6Ks9wMnUYArROD8YTefCoJYjgrEtInG22Sv7lm2UWrYauvKBuhVib1/i6Od48ahOu45jOBxyvCWATaD8tpj6nPTwxoHNldOTbypa4o5+r39Q7bJy26cz792zYq4ps40nyY9qRqdI6kSkmMznSaDLvE+Tj8pJZAducKiSuYlK6rAv46/Ipwy3+2PCyAHNfTEA7kDLJSTLHHfUC/a6JsQll7HZH6iHlAo64ff+I1qxBIaCd08WIAJocwrUnnWVYr8Su6flnzjnBaTodtKVuH7VCjd6EzQ1ol9g5bbq9dXVP6MhtTUcMf6RcAKuZ84T9AxmzHewyEvBm7e1F4wEgC04FunY3Xe5wfBJMpsnx5ICHgokEYl8E71GMomMwUykFjBlTRcL5ynq2bTtfJafUfPpfpPRYC5f82W2E+d7LSWHlDKouJBwiuojbnnCzjlcdA7OvZzwbkJil9OkUUB2cNYJxtULUHzEWcVLipumLGvBiK0UFO+e25INOXCVVBLiGz4Dpkf6nv3qVL7VMg5SULJ8ldc7jzcpcWaFRYrIho9czojkV5KEPpDG2LNow7dYw3T4AwdqIkI7KJFzT0jrWfR8gnKOZaMqQvAmyxnekhwPQMXacA2atLck6zPwFlzAwQSvQxyH8mKCmnnnzPBmm9kHo6mXAAqEqO70MUW0+JQ3MtnJeOJBMyTD3ZsNrmGaOMVlhsOQNh97eHcor5LqjAltR6L1Io/mgz9vS1FTkX4PMALzq8nqmrzkz0drxrAZyHsUejWPFiBAM0E1g3Te5Scawy1JEfTpKFe6YsU4fOKktatPgwXa+U5aOPfJZBcKpIhM2e8xBo7UEf3uczfR8zaLzRgYL+cdSCFxM8+vWZojsqeR6DwY9ve9YpjePow9V0PEdenCl5xaOjQtR1HjaNadWQndj+oU6t1oR4P3pDIOxhpWWd3fJRWeteHHLejquBjHFY83QgbJ2+UjUuixLs3dH5mHwdnJ16zF4450JCQ0F/PAlIe7COV5bjjVUq6+gUPuZ7mlu8vcMqFYtirupzf5Zg7S8qH5xcb1ND0HnblUdaJwQlbk0NBBpdl8jtmWtXNntz1bJI/xjKhrsr+U9yxR20HEybFU0R+tp2nc2B9I1CA22polzIolQYqKMloCGkJrGqmuvkTTCxl1sjOKCvuHn20jnhmL9mVaAXZqxzBrIVmBdsia2tiuX9hJCpHOlcEPTe56fPfw81WF1NODApamS85zmpRJrhIgIEQgKn4Xb17elU/cJO5RKc2bICtQ8ejbANZcxghSZHJTta/Dv3s8tjzsdk4Lf8LMJFcYUTC/BbCs/+erduMiND+R7ByZoC0/GC98Qm7FWKpS8n59i0znSzelgH4APc+NFxaWaewQpt1y70Rud14YYOpqC0zLtyEIO7phI01Y/9Ev3zhAyq1XNCE9hXkch9t+TZoCjCpcl6gOTbahXjzawmab0xqSKEtlHPIuEkGkfiy+O/7+wGrU47c3Wfnf2MsBZfSwlJBT/Cg8jRtV28ViB++BHB22MRNhUQqzxE7YBNDg+r2rfwFj+jozrIEwrc57Yu76mhENHkSq5IjEIpvNfECDhud1flqCLng9NF7q6vrbrjZoSNtyLZFvDIMPRl1wfCHBPp99475bGNHwqp/IeYpOUEdrxJK0qWMFgdVHESORVB6BdEGLCSF4HLYZYlHmzVNH2e3RCnGomakhgVI+hFQuzBREcMyNPPk3irt5nYA4/hrC/HW6dGDCf34pRPK7W784a3Asx3MKIyyhiSTCh/BqF0ZIijTJ4zod1v7z3C8CTu5NmVDD1xquBKDQDNo1Oe/11ApgUpM8pbSdARJlt6qxKrV2UyI2+nPX3r2i5s4pzsH0sdOQ00vdr53Q/PBS65WPpBlaXY6to7/GihXniuoKvOQFTtSiT5XA6IZMwhanWe4Nx3ejrtIelMdSgJ/HyvNfFlkOBnFSA36oriIQHsLjXtlOItBGoDoSNySEtk9hbc1RrcMvj1tEQ7G9YJQo0PID+Bk805OI30bs59zlJk5Cys71mAZzDbMmvwlSES8w74j4NXzKhvQ2Kwd+fCuy+MHmFnNF+Q/QjA0eT5GAnbpgAmVrzN+kE10cIk/cRwRSRQtVPve7vHXD6U1QgKpHowbysBAKq95/84IpF6IojWkACMi5YpIgnV4wN6ICwFV+/u671AkcJZxGpcaKLa2u5+Ma+x5wkXTDIoazllr2s8lvpKdu8a+9NOAV5tjhzuIm+h+5mIWu29anvg7/K4KfBT6zd9pwc6hyusC33o1+tL4SV5aTN/MBdmNbwvPoIpWoPANJs6Vmrl9EaA5afHBimJ36/Hi3dAO3i4gOVHB0w7HKnGLWgXf1o9VuxjNn09cEXqqyCIxJOJT9APSe4/tOF6UmmM7GNL2nVYlWAYHknKPmLCQTf49js3kGYgpY8CpqU5A2dVrJUntIVBtrKVpuPNZTAlD9+HJ/YXhdZPV1LBCWiKPqqlvwmiFfHxZtHOBuDWovcSkpzH1DHR45mC8s/uBY7UzhJvAeqpSIdDbn7YNSLEsIalel2jH31MCutrcKrbSRQ7ZgSyy2OJMHEUl83BdoYn6UQQdnRuhdv1U1dnQcHNUuiJ0GyJlvpiqZNvGPvx3fQFtEJjBDjQZry46IL0C9pBQ1XF40S6vDhjEUgonWqgqDnaLjpPB6qm3chI65I5I7bXaKM2p2i0N9RRq8hHHOcLsiNZV662N+qBn//yQK2n2AInvI6APFLegZuBvrC4U9cva+hU2epTu+vVr2N5WHryBBK2L+dwRZU/InEs5LbdJGCWeSZReAZOGkyCOVu3U3LuZseD+sdv3jFtVCBmIDeuMzd2cBOokVg0cZNWc8zxu4tjXzEnO5TsV4jgRHEkDWB8yqE/XpVgiYGb03QlKH712Zxb+pRMEuuQdyjaMuPDxU2u2cPZB+Fejt6ROLdvOB0O3LrJZ5/7vedvhx+BHSAj7E9gPY3asFzB1BvhcL+oHvsARidqH+LkAl2wbg03mkBn3R8ukuLsfItCi3bLcO4KDf0Psd6CfJM6NlAC6nY6DIa3uow0gJfX/y3vANlYi0QCMtAYwbiHz4zZ0Y4JIk9nH+R1jRwQ419bz28AWGsqDWT3qheV+KFutdeOW8orYc+jWbUmj0wdOHAuSPNGXQVGmIbKybz7dCW3YBvo3nItX7IIheB+nwTkTxRE/ChAuV2Zum9kTBi6X7VOXLT/Gw/hbQnDcC2TJmtH1N+OoeYr91k/AlnHJjoMuY8pAgyS9PyMkBlK+OUvj4VvBOcL921QwSO/P+2aDDT69JO10aNEeNHL7B1w2UHrpQWYNsgLRX9Q6iw3uEDlQUb28bGuVOlVAz9dOJumbrgnAQ87yuaaHSEqnraEKxgWnWqjHWOLzCadSVM9ZIc2Pc5EiUZhnwSBZFvzrEIXpxvqDivYJuAEfx+qJIZa0yQORb+CmJHpk9+2Zoezxf8ytu4TkPwel/w2m1J7Nzw+lWjdd6MHxnAoZEf2XTw9unG319jS82hI3ezEUuOnm6ThlNLuaB/oFh6WCdkUuCN+3ui84rqCdNcZrhqtU1Ocrp0A4EYzz+ZawPiPSv8mDbJZ/AQNwU9qnm/Vvo/8a5ok5Pi7EwitkzTmHA/Nec/EM3wufTFtPx6JZN1lA/EGQBw+1VWvjPOtwPzSwE86LOg7h9qb8nDElf29SdCcOtidCtO8Rra9Nunch2A6QpBfXpkfwvWIrXf2CgUGFuDR8VwudDux8WXoEr8O3W4IeyVNl2RqK2WgYHfVPF/vC/iE5A0zDmmq/YZW2akkNYEPfwaCd/BeJKW9uvjgDPEtOppJuhiBH3fEQSSCJGjmkV1mSDAqRveDMF7ENLwnuAhm4F1n3iQJaaZrKAQboNUp5wy1AsZjHDw+WB0Md2FhyFnOnYcAPbKaDs/2CGJ4clYALWy2AoVQt39WsBJnqI3gdnP2rMnA5Lhx1pRFdNTBVFVIAd9nu3WAGqft9xyo6HYhYNqvyWUailnSLOAHmRAlliLeoq1XeTSXKJWCLcw3tbgZzn6iRssHP+IgR2/zBaEw6N9vn/i1K2rXNvaNPJG7W5rFyGB3LdSzqyvh4HfErL3VC4MgLg7Hel2Fm7UAWCm9K0a7ff8sdWLeI6t/3Bao++FDcUC+BBOdtxhMUAC0mF90KyJhTaqy7bMcbDoQ2s0J6XjI5F5V2mcMbMuIfnYAXr2iBsb+/Nrf2DYWwn4WKcBoEXz1V334lLAQI92AZumJnW0jibCuRxmH+pnD3EsCRP98BilP2WwS0xPT8a8Hs7caXY4oCo7KhewDP45YUtg039TuXFgGlGH0Sv3xAa3fDXwHZ7Q5IO0DsimE9hc2bXbX7g51FiwLxeCIB1znQ+g0KHuhCms17vorW0Fd//+EZNWgUyWUAp3gftRVtsBqS3lOID/zp4Y90dlM1I4Q3wQCMZnGe9LJhhHlIcr7mSn4wCTjYkGIHNcjnaZjaL2Bhd2tRWT0pXcFqGaHAoCNEGKq9H6x3o0p5SQ85h7w6ieday7to8R+x6BO/Gb/TC60RAsIL6Y+O1Qh1fiXVvDs1Xiy7Jm39Lpe/3knX9VoweGXJ5yRWs1qD1HHPXsa+2jL1rSM+JOG4gb6VCsoJItryqWBgu9y6DHdovx4l6VoiC/2SWiLUhsWiwWskWE/eTT28wRq302GgVc4Qm5iOHqXJCsyUrWqLRGCjuhzMkuMXY/naAw5CVlM0CFCCh4OGx2PmbU/9Nykygon5DsMjU+O1Z8In7c7vnioLI7DNVcViht8bVU9DlgGovWeic1VdOZb/VJB3Gp2p03awQzf4qyDSbQxIIgA4s06f0pmxM0fiSS2BozbofyJzODzsYVMYfVIa7HdP8TDKB4b4a5isQ3KOAuA8eTGBxAj1d1bOocoIqc/xKOTpisV8C3SxlpuagZ0B7301wUglKXfMQ+2SdGGk1EYn23f1olzxYRLufIxNv8MnFH//yCts5V6xq/zv18rdOOv/LUkLmNj1VFmHx0g587xKVJ1CbXMkQL/aGaMQwdayEqvM8Yv0DQ3081MNTLNQZJOIvk1bnafoDEoG5LJCL5Rpugpbj2y3+AahsX0GddKo8PI4RRTW9ooisfNVpSufkrw0Wf86D7Bi9m1MGqiN1gFWr2ABPLUahyUSXNM5scebTZcfx4Bdp1RwS4TSW6t6POQGzU4ELO224iw464OoDTsgbZ2o0wZ3giCL5wyBhjI9hRJoudJx0xVW6MmXAvj5v2QXp0ps70aLjTdwWGYD46xc/33fHnrbot4B4ryjhsQaSvqds02zIoje5Wkl51X4/F3rrkWeul63iNKzD6lXSvPdQ0SggYUIh3PPVeyNIWl0ksM/MfwXVtZyVctgGhRHqZ9rMAsDXimRr3HJsN5mhTgFFz8YZ61yiTnrURFA4EWSrUk6vd9439t6nHoE0dY/n5M3TcN4WiVTHudTA6cJmhSZgxHXUnkKaKQgYHEnesL4rQLFeZ4G7T4W2gYw0GMzLPt9H+G+XkePIeYa97Ebhp2ibM8RuXFGoNEirkAMZL/bNqAhaD651uSpWiG4XiqCV46F2htg5Uj03yHOO/ljgi/GYDTz/07N5iUnlU9mZdweUhgrcKcIhHgBQYmk6K5nd2qQFyV9ocJU936b0hSw+83ilQwGikEWyVaUUMUMSewNU3eSLxJ43oG6v+C1lvprg8t3w2mWBCOfMIxM0oCVFgMeihbIfGh9R4JKz38MeusqPp7bbYW8x1CGnkhFKHk0c42CZwRTM9iao8Mj6MQVKgXhrYNzd4joNAAaMHsYFKyfz9yhaa3o1rZt5NIutUizx3STVB02gyXkxrUmpoOC3oJD01VcbRoj8JoSHYa3Cf6Tgvv9u5mleiOZ3m+ZS3ksr9BcB3AJ5hreqbDOiKvxQwZW94s/5JPZVt8gVHwJhKhstc0OO62Snr6RvHpY9fti+2pP2JOt0dOnLR/vIprAGlyC6F3VZCjFbYwdYYS6DrB4P4qwZo4d2SbMRnoQHjTwzdzFaFwtH+STV8XBkuAhUi5HcOOkK7qdoE7Kw5SS63ORggsO+JwmCdIm0lJsSKD3jTHuBi4IlBD/Yv/E6+ASj0+Yw0iWqfWXqE7jep1K98ZUCWVfKNqnpZ2AU2B8I0mCEcq2c5aDGz25IPMY+j+/UmgMqht/72y4W8Qg+y9tggG32AzPTflqtQz1CRFCY2pSktfrJLWir2HZK60pOHOWQ4hiAYP5dN6VhXw8Vj3jaGflctY59pHQqhsb/eOGnS3Loxyq9a5OoqCcdcc7jmIIvgP3Isq/UsXDjDkXAVYRATaPwboXyUW6QCgguz+tKh6G9elIXWt/RWgo/YwT1YMLTP+TDBX311itMYMx1Si7aQINryoMQQ/Bgxr9ZmxjcwFXM7E4VLnx63F6MkALaiaIedq2yxkQ01KrfCdNGLon/IBZgi7tVZVVz1g9JJDUcuQm5Ypaol10nejl5rIDY5DDZYisqrbs+957reqeZzpkgJc+eNNkoepgP9M/EZvsk+sVO0kZ5Wls+GNjbQPFBTKMU+4wsO4+jnops6k6eSPrelVhqTlyKuRqftUsFrbJ5BtOTHwPXTr3xtEyk4PbPDOIOLLyAGzQHs61cTP3LGn+rRBpuLh76TvjbNxAfr6EhKGwwExmP1cAnWe1gzT7RI9Yk1dOY3+AM28w0Go1gFxDhDShvySx+FVKn4YVIlyE/TmZtfyTrhgnE8oo5zfu26OCZ7Z57DmW6xZGxSZ1XoiGhSCcHMrlWHfqoSyy7cvh7pK2J9+s+2AsDUMGvOBqJUXHB6WoOVqelVBYC1AdYx0r3XP2zfFwc04zJB6w26VodRpAik8TtgWTOrz3WSJDyQR5cLB6J8L/suMdbszNlnV4uqi6nXYx7hlcMpHyHsqdGFYyok50Cvq9u+rWRIrtJOrbqGT93tBTtyQSeITvv/UHREa4zr9VdJoQ/9/VePGE/+Va0d5p3gFkTUBy7mHs77smd4OEQUU5i+ohIVIf8u7dyaSfKWohR0O2sjpCdb1sExRWRLgJNOrQ4Qt9xEYeaoIOfWrnESf98t0dXgUnwVAJAgx/DnHiGZ6F9Y970rDwarwMp2ZHxaleZo9rL/adx8aKEN5wtY2548NcgG+tEzsiSNPyhD3c4YHdnUfHeM72Z1FxXSCdTPf7VRwVZsyKKyZr1Lo/6BQ2hiF35iFPrti/LC3um6r4EHfAVOUnevLqYoq+gzylxaZjhM3ylrsn/q8BwnsTcuuRnbmTseHbtAATDCSHAkgqm01uUzeajnKDkfB3DaEm3M8IB+7cCXpQFW1ZDGgxIrSKZiIYE0m4T1jczVfU0VEuOisrYHPWtp2h9oKtAoDef29skgw5eNMqzw58pkjUcRutEUvOEqBCjv9WFV+awZrjeODTn0OEO5z603II+XvVm08YebHCA8l+EYjT+A2TwZHvK3O2ReRG5ldAks7/j0iE+mCZWUXBA2yZubstkCD8gLyffv+inO3qsMRPMkqt8yqzrTwaud7AQHUJoC0XsmZjbypwP0E2SPGja7B7eeNWkqhepB3UlP3KivjdD2dYAH8G/RJxXs4qApTzzW1Ofx1a7ztlOwGW62+xf3pZnv7j4MwjhCJvzwFRY5M+dUiQKAoiynnYu7hdt4pKNNZCahWW9ZjvKmK5SuySHiA9zeRJH4685eerBkfARO2KT3x+cH+NbLy6BZB5+q1xYJG+zJvYR85uiUck/cqoE4/cCKuuEzxCKrBG8awJeqT/e6aTLzmP9JQGF4L683pAeTqnc6hINW47kpUIi9c4vGd6un9kxEcfJJ7NfcMr79q9vgaZ62c5zttQx7H7poQpPj/nb4suknnKkhAY8wo+Lpv05qepCcnfgWSbqaMPrXcpbs0MiYngk/n/3bcZm7Hr07fnJ7Ozz0z6Fk8MaSAJHoQR9sk9hXA5FwkWXDMvMQxUebnGo5zYc0Sxqb4M/0qnaktqcWkblbEtMhFcVtHXxkfa9APm896ZJRfcXDD89BI+ttwnoxag/+E3M9JSRlZmcz1V85M2ywe4JJKlOClQ8gjDaBl88P01L+BwGMd+PP+6JW4ooevbytRhUIAAROujBSn8UrJ6q+VuU8VMviBK0PQHMibFEEiwRGpBmoLqbIb9avPutmRbS6UFyfHLyX2QSO9pP0K4fUorvuQTi4IEktWeR1YQw93SVOqRmvHrqovsyBimgZQy0/aibhO1XSJPDVmrPXGWQx6Ulp3t1QNPN2HRYQbLHNybYKETe9xM8lhNjJk83BlNz1k0hVf1YK1hKg3tVMEVxicPVgt8K58jN5QVU0LdTp1tS6RD8qLjqRdGdITRY7NX2A05HsdwNbH/plgLUVIFXYIsd0oJ/nsjQM7Civa4rqJ+x5hVxU6xEeM9FrPrn8CAYpp27Wg2CtfpZrusL6xRssaTKldxMJrsVkHPJ+RFPEoTYAeV3XUGkl8ZfZBqWdRM5HI1uUt9wArE15+RrjAFKXf/tUzSXtwl4FmCMXFL3Ji4dkLamBkkmIPW/ncWb+OyJpQANq/1YuDGiQcC+xbf617dd3oEeVBYcVgO87sByeup5EF9OwhekcENVurBK8HkBcJ+GZVf/PqyJ6b3sTyWtQpbbSmWtzMxAzPaSCm3IZrqwBUDB7rIjGNJWtHThO6Y/yhQWr7xKPKCKDLU+VjrWPrQoB/WJ18aRZN667BdU/ySR0dJq3yeU+EEyM05xHNIXUvGv6XXJixg81BIeDGSj7piOhtmGD9n27KDnJVQjBum02h77Mm0R1q9D2uJvm2t827zZMcCPN/0sA1UJzlPQdz73c/JYExGO7rJd+yOKSmIo/keusRuLm46hx0HXxp1zttAn7N/8S7gmU8mBoAKHfHVvyRfBkz858LbYRaHxY9VoNOdsMg3cJUTSFjMKSAvrLtADz2Qq9JnW4yOFGh40Xa8V9eK8tTUR2JyVbCBQuZHA4Gcq9FPaHcQd6LS4+OvMrf7SqhHrVokOFUuv68ezyRQ+HqWPUgpbcKZxaURbs26ObYPnaiXCGFbY1F25u/VstDcydUXxdS4yBZH6jyy1v0TyqPijQjlEB1OgahYQZh++bgyjyfSeIH8+t/PvQz9mNPSaz3JXdJS5EIOBTq8SeBr9SKCM/zz/x6MeQErY2uIiockH8LqXNYnabFzVBku3uoPlqLTSfHoESff2r1rkCNyI80Bash5cnzPoTJ55bxc7+iIgJRui8BfueQayxTH2KtZTlMLerdPk1K+KmrtI2iW//jeaa3BDVNKe8XC9HlXwFDHruyhZ9TrMtoQ0TfI+1FR3jV2fOEucG/WjFSWFTFxbui8gu1WrBiXXNf3dwXnTqgU2fFwR8dvdEHkZmJ+ewWREa8Sbf45EClJf1D2tR4M3ICX72BSTu6/C6sXBZiZLdCJHo1Z46pZC4ySQj/7YsDfsyzwuv4RLkHKv8sZuVCS2yDei5nTInttw7s7cmbSt0KIcdfrLBbfjqnxokqjPJsrqBD28Ka7SJ9rV0ZAsZYIt3v56P74MHtTmdn6SwTgP1kOL3v/sEY28pko4Q6ZUX1YPbQSe3XcWsMwOiX0jMZhyEuWL+GPv05X9ApnLdP/mzzAyynVWh/vfNWNfMpHD6H1WbftPxXBPdt06F2kf6yBDMDaS7dVz4egP3QakbsE9ED+uxPPezs7yD7iyDQi/Wd5dBVapT52qzITB3IlHbEU84Fdn9U0VCSaWIv1FPykWrYa5EJzGka1XAtOgfCLntqjFTYvCItFTHVwYO7sMCws+kkIY4+dl8IalMrXvhCxPEzgJQgekSQqBsCt8DyrIBZP8Q7tV0wyd+je8XLktuFmSqu7/0+TUBlZ8AKk+5+q2AA/SdOYvFAHNNZynaCw1yLT1mok/2AcTeWnAXGOmRxbWZ4kMaB9aaE9yiA8kSV1nUfTIWn8EaoLxr2D8axnlVuj+EawTtTjE/mX1lcJaVCkqffVOp6Avb+uQH2wTGT2LUV/v+r/+PBvjMwrh6YrxKRWUv1hQyHQOWp4eVwwVH+7KtJbg8zL0rdjT7LSEsqNhm4WJnm3Ae/Nz8DDiyVjlbLB41AxKUJpnrR7cpbLYr79L6MLI6uPvxpfSjRKJvOpfJrB+I3NS7+ovrAs82KE03d5DVuakgv0nJLih+R3k0/zTgTItUt2Jjoyq7AN95d573hJChiiFQKPU/vVWZuBz6UAO++knglXh0N7Nur1thQSMRtt+kClxn6EJYbfysG5bPvByhI+EFkPJRX+7oDrykvXEsoKQbRdtZ9Ru7/3fzAIyRkmyWU9gROdJmBjTlctqwjuI7hs3tMNScnMpfGCnh9hzslvdLJhKWZvNEhQf0mFKTSrtBAPvdcNTZ1m2d6l1V9VGDTJFK3qOuv4Bad3A3UQHurIbFDNYMq2noWukp0Y+NVGeuJBHHM8f43iadZSilg+3dAGlLQH9yRszuVhmSbrzk9o+p3+xhho+W84Qx4k5sI21Web0CztxNJSSc9Ix7V9ekQ1BLSpZrMGoqyD3KMrsyiBWYxBngnTrf0Ol0P0Dtta/QDMa+qgMHMFhTHv3MAXf3njpHkkFDpo+y98Y/Ku9ndcBV9tbHYnhqHnDs23XksN90VjL13T8SD7yxYvpJrmbcW9rketzQdqAYORALk6oPGJ7ucIgvIqPzQuiE+PxZboGtQlPPixYT6gIDaKjWbj3v/RcjnhFilJNFWxk2te5QplGqDNyeDWD457GJoegdiufUW3w25so+aRwXxsYdXFjKgtdHYc2l5Y0Ww4rMEuWEVKalUvInlDlgyAoymIxp7Q9AkIEF8JYbXq7k3wg6CYs+Ahg/pcuG6O1r8ZCoVbTxnq2AjSKnsyJzTvw81TXJ6YZE6LrXsHGB3jEklGBCtx69X3vP9xoKn7gx95N0+O9PbYC2+Oc309LRR4ymf3I1bVB8NHxYBgLZQyNiKuEli0WDRf3V8G9jlCikTtljwTsnS3ge/85lh0xJBW4qIdPjlIMfec4F0yv2Yyiwl+ThXtZQlwf1XBDaKvXXXnulUAJSH6BE2OW48ZJV19PNYZsk6i3bpBPkb0vMlU6O01GwKET89qQ5Tipq+nqMA7yWJGEy3aAqiy0hyg2lV7H8UKIpcDdwuXcdXx5N1Lqccd1MulhPdhwUqAYWzKAd2KFP6Qq8/8gG7n+k2wNCLQoCANBs27Zt2+5m27Zt27Zt42fb9bJtzCJmIQca9tpt4UNLv88LLW7Rmi4nHGkvW062Ne+Eeynccr4cQ5vh6z8DNncjvijOtQ6VD79l7IxCWhPY3hTfzRxy1o98jRr5AjV10YNAjZ0VQlycx7Ni29O8leACAaS5waqL+GU0fihFppjkwayA3W/DaQFhGrd4JsSKTFlJXCBrnIYerIXy5kERhJAjhOhU4nm6H2UxVDhdEhD0Mn2IR806vFLi6+pn5tjxbw1404w8AYkUJTC5uO8Tomzm9RGnwMbuCpssWFj2gibP/bbBKkJrADs3JzMGg3nl0gbi875AWhow6EuhYHb4AOcJkKGGxLzsuQSBccTUnVKxUlwrroLtEjj3jrsJU9BQhVop3GZxVgg/JPqikgAFpBkR1aGdf3Rifcgkj+ua5mjAmNS0dd92Q1wf5Guk0FmsalBvehty+9DuhAHK8djAKJt7mm4ubLPmwe/v0oJdWfZdraBaFEZUYa57jdjLgj7XinToC/WZR5rcX4NN3PXAQsWjaxNdwvhX32nKeftwSBTYRC1MuZ42SjYGMiqon+zIANr7HWvId+LIjWFpFBUoi+wXneChuiRm37uJmlaaYs6ymApjM1jpeKvg8qslatkaXhsJTHCa0o4cNli1xxz6bsaf5BCKtkfv/cUySEf2eRX2eKXbyc1l5/D1nIExrzvgZ2CmYxJ6finIvxLBMg8gA7WRspGUU/ViQBjh8DVqW4eT/4fQyFj+kZuzEVwJT4XX5f+sXjwQhvaixEQLqfXzQY3f86SpVYFasEm4h35mhFF1tHqTYJYZYF3Iud6QhrmvgSVCPAT61+KXLU1XIaUhwnstv6GE5VB64M/e650cVMU68ezSojKOPQYYJb2M8oo2K53k2HCvBwE2U1k3JaJwMQwf2wVb2e6hQqCTdDktTeQ5L2ivwhSiIP1HHamLa8TkXzaqbaeMb6PF04J5aDVz7L62yLMKxeDR5z46XHn+cKRR+b3QhMtkoqvvyXgz/5EDmxYcj2olcNDgMvamJhsZ9pHajtryq176loHwpxOLU02MEFJm7P18QBL0Ag7X+FjXx138ECOqG4mXa8HuO0mbkoX4lHgEu6U2ikr5G6T9trcCkTx2BMFHnuMjW73p3lUBpZDsDDW+6CzYY2VAS9y5ofrJqff3SSVfpVaHIDYwnDSBbuhdOz+whZWiUiU1QZimgnaOnrWj9qEqD6ZIYBWuhQqpweRmHzgct/kDozWt65oWQp9aB0QNvprl9+LYwP6bIVPIsdw5zRgU1rIRFfJYJw1Ip5bqI4NUTxtcBUGzUJp4DtHEAHSd4J32MZrh/dkbyeKbsc+PFI8tbalCNuL1CUqZAUiH1ZW4MSYuFUVE4Y0tc6nApiB4ZFre0zVyp871qJ2JY3B4N0Lb/8XWJs93EqPiKXDICCkiZnTa6TCxDzWm56fxV3DIYsVYdcwKVW2e+TwO7ECRIaVUjYcpkx4xipuWwx2sH9K/j75MfXaD2rwvwHMNtHsQRb0Q8bEfr/1jrNr010nykTk3wj7njQahsQvy+csCM4XiRs/hpclVZwR/msklzYUPJs0Vad2+njbvjnFGnpmDMva81KYY0q+d25pnS4GDfGDh/Gw/QPBxd8ZxWVNAXd3+dA/w3wZ4PjUzwOZo+S4G0YPytY0OweIiQKFy7t05DEYtHd6ibdHD9UidunZsr4jkk1zdk/N3P9+Zdzqm1eq5HfICk/fKAhZhjbYc7az4PHg9O5QkY0K1x4p8g0ciFe3g1ZV78e4DgFlp/Rjfwu8KHmdxUXxBaRiB0SzLUwWQuDdDrVlzavLd7otrMFvJwG9ye8lZTEeLhbkau3zS7Dy8EwCSt940HStA2ZW2EnSNITWxKgbf34zV1ENw8zYKeS0xpO3f+8TDXZoqsNdQz8/n3Cjx9BzJnCrRD6lYkwB9JUeDpTSu2reAXQlXzdAFPsCWs8SbRewFtQMlWATwRvnSq3Bupkioe2qErlbjrzbEZ7raAEFemjbIa6inCuW5uTR2YpnxBMgNLpJNm8gCPXWKGpEFGAxs1mfku9zgbdoOcLm9hgOgYhDkY7x3Qt9Mg2CLMbRfpNei2zwDs3El9rsyuPqD3BBmTIdQAXSFEm9h7Q6uihFZ5r+lEjlMagSD+AL+5j5ktkdYN07XJQPvXRc+KeRpX6t+2bzgvrzMczKXTCCF8xjvSjrrokiDJEEwr52nd4mUqGtVekX6z9T/2mSpQ8e4rrugT0/GHcxiRG/DzhGvjXCY5kAjDSNdh9X384tXrgRHy3wJQaDWi+FUwhNOZC9j8cKUeZW4sUsE3OyFVLPOyOn1BVWRJAf+CYp2j3d6mqlTshf4xwnwz16yv4JufKEP9pkMxvIcL/3GtovK1DM42RlQ9707iv+RdPnSWG7AcQeObL0XB6w2aaC98T91Aro5GJJY+gh4c9uv75oSKB4bNYEFhCObq2BZETcGcQHCtur8Ak4AGcf06OcpggC5bM8doyzAsuHXPBpzA7emjO2MmSLK0qJNhwiaGFtZXZWvkr8yJ37oLS3eIjuH+YQkBvBJfCqCiH2gxpywbnBN2xyTBBHd37Z+uQD8vYtDp1aMb4VUsOV81kjhxCiVUOrEdNt9DB9rIq5u+k8/NglMfsmpHV55t/QWPxcD7NeqAkCpMij2eXqO5totajK/J0RDMM8L08ixoRVGwCs6sivhy1YjdrEG0NbxjMbQvCGjhQtUkJF0VOU3eZ5cwVzIzt3cvO5drPBOUnfJdSD7nqZI968Althudp5lwjE3bTfOFW/TpET+MBSTWWAE3aEmJTG+jslt1v69tjD/bgSqE5nCQyfEyfXsN/k2HkJ0Weuw4dJcnXiGjfJssy1J6dvHfjvpemBos6dXR5LJScbx44f281Zk+5IsU7iMeLwoUl9w+YtYBQjpZLou3lmEfpwkFxh9w2cb7GZWqCzm6He9qiD46cuSUq7QvQQ0/Vi+z82iHMBxti9vMyT8IEl0CcJYoPO1fvEA5h8UqCZvekF6Ob8j4FMz3UaVNKA9DYRduQibGvEpOUryzgN0p5qfJNZX4L0KqYGhXJE9ppAJwJU6RKVqbPP0OhVw2FwT/HU2fFsM+rR7AMJyCT8aS2US1x3odvDxS4aS08gRpCXp1oW5JQgbLbhI/HlrFTyTlT+MJLcQdCTBNMPeBL6J0qzXNzmbXOHYiecCEhJ5ySUFdWR13TYv7w6Pq4YMXckd1kD0q/1iXUWXWMU2wDJ58DX/ysf87yfuECqvKgvvGdmBvOxA8SEE9R7r8Yal6ahLkoSimRo6lUWRykNeq1KYlf3nVqYPWxV1Qtj8oMYvRHCF6F+XcWgxYuivHNakzIwErTlJ9xDjjH3T9nO0Ej1UD9/QlmisRuARCpwSylCVc3R1e69eYow9Tcj2NvgP9oo3FXGEC8KRkdpA9o6YPsRN7V+aO5yOPQuJeelyiCBZYpc5c6hvCKUX1wtQtE0ATluKIgGDUI++XVTK6FeyssA6WmhgMgLO5mJp7gKf7XHEOr8L4jDhGgpkYwB0duxLfC/AL2j/mWFfql+KpeUrJGe2vBlZtAR5Ql6NRlhxmQxGXanZPGYkujs/IZO4adb22aEca/9MDe7Gnm3AJptXe9q6L9DqKBD1aBKLxyR8ThPa7X3+hHz1xu7KNfRc4WNxBU5+j6isrqCFZvcF2S7x2giA8E9zlBopjf6aaiC3XomHtp0c8dX+FH0DDK9ewQ3Z8Lf+7Yl3l/CyrobURjOpll7D8RXK8TW5XWvBInJUz08mWi1pG7K5oGXY21d9JDiFJIwHU/9umeny5b52ZJw2GooKI6+7b0DHedXhbkZvLEHAuZWzK9iOOBhmsftQoTeZUVRxvg6cqLY4d+CxB7I39Ml+kvw/PZukEe1DHK3iDkqyohgVYZLhLiBhJaibxZwczvj1tmMzl/x6DlkZzh6MCmyjhuGyKc/lJZwv55qs6vXNE9IbDdhvOuEei9Bs9jp5CjxiUjFoZHGX/EKCX7C2lPylg8OZTAyO0FmbAAO5QJfSemCTD5KnJ26blgYTud2xC30aCRe7nP5bV1cO2Y/DJWdFOFEVuvlP8Eo/yJiLj9c3sAAzjBwLvRSKzNol+AEbe/VY3ozvBxmPvDDK1Mss+dwNmGoasgzis9tD82U4PIYkzEiRhrCKO+IchIMob2WLdS4a/lO2TiWwkGJ8xtx2x7fwlOHT/zJCbUIl1EXOL7gSyebaBQlR9pUB6+fgIgn5SrnScplEoT9y63006crhUpYRpM5maR9U3t2vTtUPcU5Ic+MesL+/P0cd+p6WZBZB1Coy0mcLO31D2RUX1OeaIr6ksZ+tzPd8MT/hdzJk66Y1PBwHpUqaEKytgsdkYPeP09UGNJ3vLsJaOBI1qPa3sasC0e0iQW1Ty6T3ggt23kXmV/tJYa1xCI2nuAu9Rp3+m8P3PGe4AUZFmoHG8P8dYkkjq20JUJQIDTLFgVDg0Osh/NyiwnRW0KDk/GLYwkt+7hUjPsNilBq6m30mx4QRNqYXbKMoCPnYU0q5FFhlE+jpfPxnH5aWWUSuEbzQk2sBP3VWfVD9pXupgWBUcpLC3yXvunN/piqrEUHbm8Nyilv0QEbssRCMlRWAUhGpDQFoqT97bPd8NTk7So6ujTd+yczpcFxbW7sSQO2VsFYHZx9It661H19Tk2KzUb/TGOsiZqXI9kmyGke7GActbjO7G7VB+sj3rdDzFKBCL86JWa12/k40GX7vQ7zO2GLaWSprsb71TJXgeQOjTMPOnPRCcXgtFloay1xaOLl/jQ1pxoTDihVsjaTTmy4mU8vaSu/j/R4kMGFzqaeYCloVb0stTdhdMimExC3sJgTrkRLXSVUikOfwOYTg+2ENc0fbH0tuRfO8eF3FEvMyLE71XrjwZD8sWUpP1Ez/OyXESenfRZk7N8xU/ixVXsGmgKUGFPz9QEwNapNBhPW5WOgg6DdoOPM3MywFh9B/yZ03NmRE+dyg2OwyLllrnPM2IUu4p37oW8UAbf3m9mP9M937x2yiPVwd5iepCVmnS4UqN9n3/gFaRG5DpqvoSjSjAh6yQbQqe5pxjJo+zm/y4dB30VPMUYMhzYdupMX/jm81CfFr6E58nt47WhVxELb78QTwq9hn5CLRopGOg0pY40uV0f3+XtIGvNhUoorLMg81kmgiF52SWV1VwofAjqWeVUTH7AeESE0pZp7APaTZMtro/meRjik8z0XzR0coyfTWqI7LI/qha8/sLGdNMwID4u/3G8jKcz9uGR5pxba0eS1tCQdoGM2wUdoIf2NNoe/ccbxpQcDMWszhyzBx9nnN3PN4iVZAtfbsPayTY4yWwI1gVTeQD+ZqBsazFFXuD5g1NP6nvwhwpKoOkJNU7OKm1DUZd/cukKDS0SRT4XJ3DJ292EK84ug0C6I57oOMElDSvnNAG5iEwxU50y+DxkCvLkbCDL3cqQ0Nf17NB8uyYtTaBZpDQr+pJiBF84oa98ZLKVFV+v6MCk9N4yTROyn6l5mUedFSfnfIalySSIdUwVikBVRyE7z8iJOxmmjcPZIQ7iGbv+DpybxSP0Nbap70beBAhJCaAN49mymmdx0yFvY1rfvctK6LHp8rjPAnaj7uv1IYHi5XWfClTcMa/KxUbojMLYxHSlWBJ1i8WFwUiJhPOyW7emrFLvkfg68PJnIZGOjiWbCuX9EcbfzYmtFULwDZaZyLYtGoST/gXdlGLfZ5h4HpIox9EYio6F9XrC8lC27G9SL8hc3jcfm758GTPzqggK/LyZsroCuKys5s0Uk2qAV4+aWLI23xPx8sOIUuVYEE+uQbifLWaMAvSW3DOeJUkbwvN+ERBYTRKEZFxqrqRPK+njJ0HAkIOHINnDREQeR1PlldKXYt1ckCFx2K2DQVx9MqTzqlVx4eP0ivAuNhxerpXvnB3JeQa3EgHhfBk41zSXP3/XG9WTPbGQBZHTng14oNm9/xFYFDSCLjz08zWKkVmZlTprr8DyOhXdUeVELIr4SoIcJ6IsMQLMp1bYoRJXcVPXDjLQAGfx2s6zv1MaXTXaVJOhSKTXSTHtHOwleXqeaG8hDBCd9cIB/kJhA4Jz/ysFSWgMi+Rvo3o4GpX1+S/BCYkJlGheTiHAq75dFEI5cIZ/FuLqRpN3S1jPo4fmjJVk3z/qWeUCbv1m6Ax17E/i13v6HdWELO2/2WHtGIr0kKtv0To7vQmfZS+B2VDjVrdveOs5RttmwBVkdaPQXWttrG3xjKbwWuqbG0Unp0ZoR53mPXQLdgNUz1+KLzw2Jzohr60Fiq3mxGGn72l34+LPTr5OmUyHNhbSCWQhcCyQv0ppR55gGfFsh9VnmFMy2njWLuKG9kQZdvuyY1Zd/NXLwta8wZCsboip7NQST2l2Amk7nwSZZAwkgBiggCQQP43JiYtAu7cFUKNH3PkyOUGbCsdYBv+pmbsNE86suWHLwrnv0btanInmhXDGcHAuWtN9iBqNnjyBOE0z0egc+WuCJc49UTSak1EQ0ZCLWcWJYQIpVvx6RtzVT+ETlb393+h2xyzS2NGE63fSsfsoMOx5LPPQA/dxd2S40OGg9Un108Doq9pGyJH0IS+aNTfUHQg6rD1Q5Beod2VYLbfB5ph4jd173Rjy+9b69Vc7ngqU8+pktbzokIGqt8tGys+AK1qRqOuaRE4N3H8Ax2Pmw90qJ/VybRBrZBn4e/rDe6Xs5fhdoLm9Sap1lXmgZP1aGNywzJ03e3vFnE0z527YYTzQzdeF6XSThrVOqB+FzRFP1VEbzGKQin5qKIKxumYDJAw6tI+uItYMtQH04+njROPl+jvutqWSk1e2SQU5/Xnr1AeXX5G+2zHOXLdDI5EpeTS85nleN4W0dWsbpV8ojOQlddGUQN0gGRsJSL1fTarD+PzDx6cgjJo4P8QCx3hyTkWsTI3jGrYTOmZ5YLjQkH6EFLAM7E9ifC9IBi1f+e+jMANoQ5B2KahnEyY0g/fihMCy0zYoJh7W8U2OhnuixwQV1cApK5v9KMuse+bdRkWIXnl3mh6831ahhh6YHnik7CXtwoGhNcfN63tdQiloDfjaxqezsRfsNH/Yz+GKtFzcL39SgRic+m+LC9Rt9FBheljq7+fETIGyYDAUgLPBQAmLZtXjntZdhtFtgyH/r9Y6Pr+bLHENgaXQ7dpgMb2K+WvwnX/gLXuFKlcmQWaXf+m7oHMkV76k1v5s1+vG9KjuKdkYzCfJGRPnT9Z9EeKAEdiOWmaaFb0BGM2zOsrBuS7kJMt/wN7XT/g0JvHtm+1tYF2hB2fvoDAWuPv+mdCmDnAK+5A2LsTV1kQ68jKHZ/5A4ocoFiu7r1Zl7GkcNj8+OW27S4JR8pJ1J3DbMDe6MD6/0kD+hwrAkJoxisG8InF0ai5P+yV0P2LKQhRBRL3jSoCM7ROsu3QSmvmOGvkAzmY7yVX86QHcTvADe8AHY31DPaBo/6SfIs123LsnIEdmmYCiJ9Nak1+FvoStD3Nn2xTDN+6Ywp7AmjJQmS/i+af10iXJNE+4XChiikpvhGOWHkWO4IYWYJydkk4N+8Kjg12sswak30SkwblXlSqhN60tKTX9BbLkD5hPGwuzQ0eMSCDTq3Ih529mY4wbKoWIJFJw3vA905u8ddYLP/cNtI9YNQFXIimi83rMlGl0zummu5WHbYM+6f3PLcPGzctwA0tIC5yV7kdr2EVF6uLu3P52tYin0N8DAfEkz8AVGJ0GyVxX9PA2cbDCbfWko7cmS89120OIL7CpZ1dPp+C3YRrMNY96wDGDOcmkBDGxFYOqcGsCJqdXkfEEGD22xUM9XI2PJP+SHeNeKmh5/kCg4ZvTjGdzXqUzIBE0NvTIKr6o0VVx8V04eZQQ2WgaWUlqok4yDJgQ2jUDCkVNUBglNRMay25n0fgFcLkvuDZsisjE2FNAPqpgDRp3Zh7NAYCvj3GY1CbmnGJFAN4iQlJ3JnQuATm/1KK8u2/Gbp6kaRhNyEKcmGc8GXERw7oquz63sH0u+rj9URZl7PmgJX1ZUd7z+mdtXdo9Q4WFGCYyxqpyMM5c4OJUTFLlVmxUuFeQlMigCf4vAYz8FQVZFOHdnbQz4el+1uo+tbnMl9ORBlfOp/gxEv4oOlF7TM0/i+098FzvxBWFhgDh3KND2fVxLLKhtbOTz9QQVcRh+T6XyavBNEH5WDK7LrKa5DwNA2h8tNHRTPtv8YtSN0ZJay/eHbCrHQDik0btr+nRThv+azlT6jxRpXv+FQhcHIRIKKa4N0IW7560pUkq4iCPxD3bss1pJoNFAOX06+2FTPE5oXHSUPxTZOTsqehAB8i4vj24nWYz8vxZfLoQ9OX9p2qNERf7foEOJtk0kBn2yVzrihrj79Kwo/Hq7z4b36h21B+zngFsuYU7lNlhHLYYPkPn+//SoxXyGZ02aDSZBm8ZsiGjzcPnmbjgw2DXA/H9YU0pH4XOcb7WvBdf+I4mx1o2B1XdBHe0U8nVIqFkjhPpZ+c8o8F1kJrKA3SNCXgxBV8+3yMhpFTz0lhCf2BXfDmH1Lpk7ydc0N0vouvDfwK79hFbgSXRDOyGsNyjEjkShk78Lj0RcWWFh1+GiJQKUbGrpiKgZQFvlZKO5qSnPRCoy8zTfEsmcTrBi2TSE1xw2VpwRWT6044ohpRvOpWEuXgNU/lnoUxhBjyjcvfEciogkquGPWPLzSuK3vaTfei4i7h5A8Zi7TUlXxt9L5FsOa0Xr94VyUamGizGIFJ12qFG2fJHN2is7lAKDed+ehbfhLRB/Hy79aDLDCGkxGnZuUxhkmsKHtXU/OGniS3dDCYofJaTcfrZayKzzGs6LWIZKE+H8SyVuTukF3H4G2Oicb/P4zPR6XLGHrbAcL7HxLY861axwS67tWKga1BtG0wuJWFA2A5o18w6A7qv5Ysoyd+y2p+Dgz87xRPtTK0ybkMTeGX6qhRWlt0CYA430U/Yv5aJxcPf3PlvlJia1fqK10leSRGxP1Owz2iuMjNvRorINQsdzoltooo0VvQZEoL+/1ebNt0rMCpTqzjCsq6ILwnmzaKoDJHw77YAr14q/FvQbIrYT3ApY/dcQYb0MyXrhFTM3Bu+Dyg+arRjIlvdvk92hQIF4RKPCuSqLG5+6EQ71WDvp6/z9h14baR7r8AFG4qT2SN41zwxuwbSMt2TVJ7XNc/7WlT83AqUUYD8AnRRBsmCi8Oo15U/vhj+kJftWyg/3qPzMtFiL+mSODD7Owt/ztss2JM4a8eIsJQ7fmLSsulfMGCoIlj+Fl84zqlHP9WsrmaVqKuObL0Bw9StXcMDpQfdeSrgxqFNYYaD+aWQm5wK5ousPUolRfWFGBSK9VP07shzvD9cesyQVvQSJDd0dv1BMiUDmo7uqqG1COuLL2E1TDMZ+qZb8kmTq1MGLU5LdNc5LYq/Nzr8LEZa37BfSqmQr20zwetz3Z9PzUL+xxERxBQYknF/ZxcTiLgqowmE7eSY3Y0EHYNLoKdNg5Lm98SlsM91hRHiDYulEfpG4FVLoXKQSz+pDF8uhrtqVBOPdd9pNOkix+qjYSaTvfRT13LeVMq3QUymeSRhC1FWIwiZudfoe09IQgGWpQX0BNRhU5jhF3LuKiJ5TMbRRnFpDtsc+Ke+rWB94UlqHKps1rlENzKZvZo0VkzassUFNp3pYDhbjWhic1uc0NshXQBj0cZwGCPIrqeLzDPL1wve9uTYzXSR82xbIy3Z3POCeokPO6dzhzA/fUw+nkq6w4EP6xi1aAdKiU5UMskMjagdHqUhUs5ToME39J+vaqiYwIkRex+XMRJGlEIThCj28iTdiEYCPGtqY1IfNCloyC4BHPgYc6cPegFWNrEMDFkfMtmJ5Zhyj4/Fs8CwSAjHoIk6W6ynkbmeaXxu9WpjnzpkIJr3K552IBHu9QACAM1j/rX6Rwbl37VR12W2PixOO3S/QPGXmbTo6BsEE7KryzOsvH8BD9WG82cbjKfBf7yADNkCSqbsl4g7OWA9nKA34g4hg5hgoiDuTBvmKvV/VGBQ6j3j5hYhNCqN2pLlIoDGWQeMLSDbLctJRwXWB7XJVLo9XtrhILFTce0MtqPNr8ECUdUbWSZU2gVRjSxOSFrIYXMF3hxmBl5oBcPB/sZbUypu80N75BmrLX3uz+reqBS8Z1aL9lkwrPugyRDI8yTcDJU4EjJLtqv7mZ5NmriawDwuJBuuy4QxB9ub1VCfHcenyT14/F/1Yg0HtSUw0E4lFn8Cz6k2HOSvLtnnRBMOwQhm7NtmL65K8+oYR5qCiwBEKNymUhoLrW8XHMQLzjlNH0J3lQdfwR+AU9XQt53ohJ2dSrjqQdswyFA+zVyyNDcreWHRcQcdCIQvzB6g6D+ms6Yc4ZDguvD0h4l7JP/oejCVotR6bF/4Ac8q6+8Yh3LNpav6DAa680TB7Lhhws544AXvs4HhchLbrvTU8c7/ALfTU6xGOtHIt0ziyAJuPIlYUP8VqCaFytzsjejJKRikL6OjTTuwumUJYis+Xcsf5KNU+ZHaRqVyO7qLzm4ptORm4vVJamQjU/OmZjkD2ixt4s1ZmC4Ltvj8zhI++bPKVHe7Bs6wuOb7SPQJjqHW2KKHsZUFdcceSPimoVb8YA5wi0YnMF+Eokx9ybA4lVbwaMztHONdOKaCebiLkUnZs/+myOW5F/zLO23DZRrlLRhCd0cKXpfFTDDciXtgv6qQlO52YPcl8ZgVFN1rVj3HFyfRDFnfFs2uHZG7jb6VWIV6V8Qy/f6rTaRbEXAxGlpxCHk6blzo0LCJRliEuVDZKEn1IYWxNN9W430D5yw9IW9FGHjb1wKg64uMx68KETEGp8VGYn1x0FBxsqxG3SGNhhtYpLOcAWObDYnPwmo5q0PreTUM4DVoyzEJwfifKPJCJpDjYACuSZcK5PvcNsAorKweZ2FPJv+kV1ZdFzEoU+HFi0FSk1GevHThZnZJSyyVeeZYOeu6a+cmckStnixukdwsjhPha7t9nzJ/Qx4Hpf/oXPtCm3TXsy40qPqXd37jFabLS6/iizo6guY+QkVW5yQ+cihKZR93vZt+tToqNGz3AE0+9OVIqRkXETnGiCqVv98ihak1av+AmHmy954p2zjf1r6Md323A2oSKoBNznHsIBcSagSnjc1HaNwVCg/m3WyJt3wKjtvOoN829UIebwicpHwWHdTTLsLEAdqsOY1Yv1iteQUZR4WxAmgWdI7BLHdumRrb3bGUPh2jljF2k06BEITlcCxBHhUlXkpaPq3+NLbdoLI+44b+DAy3Bb5Kd1/qLbN3+Jd7cltwBH2J36l0L6wTt6XcRUOZA3fNGs/aZrmCjKoaBSfr/g2ZfLidWa/7r1z15JNtO8XEYst70daTjWqlOqP0ldRYEwzR1vpJi+56N3oP7bt1zxLl6YnMPHXHAAYenLDJwnUIBjjWdBG5IdgXBUECW5rrQJjZeDjysZkzMUMu6Tne52UenrshDDMzfkt+i+ygXbjWabXSPFpJWx8SkPDwnyrB9mvNWaekLtFhYzFt9/hQIiinJWjgbCe0sXkFCKrzYCFiSmq9SGMaXxh1HeIOmEeg9sePAtcrQEp95SmiHkDwcWAxqCbetXpsAq8Ls6WyZW+dcF65FqWa6ACW15MSifd4PkNwwbpgirNVpfOhGwND0i2+ejCZ3AmyBGRzLUip15QweOEMaKSFrK8fe0wS6K+9zwMnDDVpmyBP5f4fW/R8WyNKSzAY6ipGGxrcFZiCrr/mDyLIX4w7Tg5SprZKNidSKHZ+hUmsgQm93gc9qk+XUaZFQVFhMTdthEENFYKt7FziursUB1lz+Iu+z75dxNXq2PiNzOgudcB9Q65HYeuM8+oZ0yUevn8TtbauxnhW3nqfOLhGYQx7+iPkrjI7XPF6JjmYl7btbqpodilTiyA1KLJl1WRHLANA/IUXmQ7hDt8/ry0+7ReUXrJLAzlZjM/T7TJWT17BUNPz19/eDYVP5htKm7YEHdH8wr7kF6zUlXdtIRk3bQ8y2geNiHS7Lsac8EOdQP+Y5gEykKJB1CMX3oATsLPfb9hRhzj3gH/rXaGF+Oq/UB1dsB069BhDyRLc+YoKXQE7O7ldn7NynO9BDfQdY4bckFrZgL8tsbJFKgSu/RqYpyUzY3CaR7+xPeBtfzTzXvFiWEWArETIGilmJrO56V56MQougzxuFljnNBiXbjENwYN5zJq5TG1aJC3qDkXRG409nTnWXvfJowsGaQv7Lz1G24Ux6pcYQ331JEYuAm76oeCvzXo4EZwRYgZF5ZbjJdusRhnAHsW0yPKe8uXUDgfNEmNTlhOhmgI3Cfh8asTB94liF8vn4NKxwEnUNgD3E5yhoAxKgT8X7VJF4ShW3YKlHsUcNaC4j1r3YKoV4W9/8HkxtwI9VeOPHDyavSY1+irJFLcTiWnaZY5JLSKHO0RfUVFU0xpP+QMrAF6pqanUggeLUVUPudWnauWaMN8aKOsBXtSeyLpNbH1kNTR2ZPCGXb4T2N43+dQsNOSlzAPs4btSCR7Qo20nr7V4RJ/6dYduHfiSymZyVzUKOgPMawsx/UB8L0OxW9apkapK0VxQ+7QjA4NwmCTbooYAB0UGvgeaTDHd54yq5IIFt9HIi1sB6FtN9kJx+eheY1s2k8gGVT4hmlNhusBKqZeV81b6YmR+yGVWkpDml9gbb0D6Au1fu+0ihkqAdQf6P0xiVEujy/4LIv2NZXHaiCjVy2Wq69HYkNNlqoWhwL2FkngRxIC3oNpxdB79eDYnaK5U2GZGyRN5DKbE3n+3myeGZwVqMwJK1jgP32miZESPN8LUkos7FvElfwq+tteboux0gPaOCpChCwMGSc9NoLvTJ4iMNkJBy4J+6VPDB1P0kDK7CV9rBX6Lp19vV5OwozkDKa2q1zgY51PPRiJKti+u94qioLx4YlJzl36t73DZcnYTzL3Ex1FIOygfSfQQkJIV7H8CaHKs1TNRpxgVVp56XJrsfsybAPuVIMI19IMVmgnvHriebj1pohvOE+OKA7lSDm9L4JgFez11XWd9Ad9OdkD3mU27D8nqbEnl02Qvd0QgKEGRXhPIBkRrUqVMWywAV/21GkfcPt6ZgSdblLzE73OF6K2X1QUYikLP+Re8grdWstn/ALtlh/x+GfnH5yHYEiSdBXV0MdayhocHSFOi+SzjhfTbCJdcfXkJs0od3GdQ8qdQr6id6z1Odo8Fmu4/vAww5lijr4AjI8zr5/utwEhNnRddYDNSWK0S5vs1ObMdzOfMsYyigqhmn4en+CogQzdpFf4jE5KP6HnP5NefJqDHonA+mIlSqUC01kfqybjDtj1/wz+bebrSy75OiTsZASbzJNrqaBw2MdWF57cbC9tHM4l1GS5Xdegas9R+xcQlhHImO7B63PfmErRdDvV8JfpuKUmgmdaQzh1iFoytpNNBzgk+jN3W+bnGVoANuxd2f/lxRJd0dzfcaSw3myBhd2mzFLRIBbhCRout6qYhWVH4AX5ETu7EtiSZF1nuTytpZRydaYPsw7RqDA9AtFaHj5GUkRm5+NOuapJonahFK2VTYoL+5OK8UksJFJBQfOzENn6QoDWNIcqSoGA0XeONGk+dr+W8NEn2KDXKcROjJSaJdpdM2zzsIOHBOjkxwPzUDPqSur0d4b8BxnwiEKYJcW3y4q0cYXLL1069aWChBSHKBBcfoHbo1M5FPgbGwVZtSaiL6J61/js6MWIcXnmqsprMfJxF9z3GK3rWm9W3l21vYH9bKi5RskGz/8R8MpUu5BmzTlDV4+j/d7LRlvHvCMeWqGh6buF/5zOvIHNqRRg1YKSX9CWWtAtfp4hzjymty32ZTUhuHfpU8cRHxe5D0OhH+TlKL1biVBJe8i8h9WUjFa2MfSUn3hIq/GkxK9B1By2cUHhUpXVJK/m2KKavuBJb9nHALpmLkBPMv87doaSmBM4XeKMTFbhEg9wKcTGLHqOTVk+MoESFzBs73sKt2hIt1HJZ7WK9yLhvFuEDT2AQ9HbCKiCcMp5+EwAHw4aQz0hi4DwSHNjCHwpJlIdLtszNLfJc9rVXeG2LYhKAqtabWiPJ+ajZmewMr055fI4jBzGQODjcazryvlEbt31Y7JlLeqZKAlhR5xREeMwUBzu6Y/bbaI8nqbLwBbwqOSqQduI6wacT8oV9Hhldh2tty8cMY6l5mju1cEWRZibi9IfSM8zporFzKV81QYY1vrWOsLGKF74BkTEtXPxl+IPLQbXnJWOI2AktiOvwcAczqPlvldx1Sn9Q2dPSBWRR4Ho3hqWCEklkgSfeURHpXkrpvlt6K9voM3oP1jKRJJ3NjUaBTkXNQSE8FNYpBlob+9prznNXPrFh9+unSO6dx9hDC9T8VJGV2yVyi8IjvlkA/37HR3vaZsFaidk1cKgkPlg4ZqLeaEiGRaioNXXlzZhm2V6IC9Y/A4Fvb4eI+BwZDuI6Q5Q8s4/cnZHT5pqm2M7hmVXjoqKpWqSdZKpQdzoyBaAaVpeWzMH5y42FluhLyJiSFQuWMvfErF0Rd8iFAJtiWL48C3EHRgfEwlTxv7xbFakEoi7HFmE+Dv94e4njHlXgMiKtFQZtiEXb050Sd1yNkbpSQOCUvRsyxT0dCKO5HMdnlqzZzllzkImbqZI+L4ew7bGON6yF1sfRK3RhKkqQZJZg+sQ9s9KHyuh1LXbGGamM58gqC3udOVAwNrlxYQnjo3el4E9IPIe4cbSycQTqg8p8lK+cSOmRvFV1sK9LN2VJncQbVcbFHK+kdoKzKPcOCXMgknNrz0MekFUkXpmNdC7/r2EDxV515K0TTXjcmtVWrftesK0tG1mZ7S0Msrf0NYVGWB4tV4auxM79Zv7ayjORAO35sFsgF6aqJm7iFoqTVzc0F2zlwxeIPTnre2fGIMZgtX/UiazAJD39IKu2xbuy7xuCYxMN+oB5aCD09LugS6UYlvTCmjvhFGh4RheDUHcpHHqNq6EU6bRnYeFMbSL2xSjr//lZjfw/DpP7+T38g9xmOcoKmrKa4A5aMEowvQkV6SpK+/6eogs4aKV5CSg6rkBKYXX/PHu/XP/1sXbCoXJ1BXMUfZSYNhSJoj4YDSg+UW5qanUO5Fsen9D8iBDaf7NfeqsyXzHJ3fAranj18qn3CjsLy92WCdaJBjo3QsQCvokZ/Q6iWXRIP+OQDy7bf5C6hb5dILP3rJCDCLHnL1F8L49M2PtPu9FxqMePXx3Bl2WKxlF8j6AygkayczSizpTUOwlBGnnSoH0XKpCRzGwZ2er7fkDcbjlMmoewzHayd5G9v4tHFdEH2vX5quN72YCSrwg4TZsvOKWY3V0NXUH+LJ7EMK7tiynMk/F7a6sP4vVA2588vr24oDe+6bSjhFP0CskCQMeNt4Mic59B0sZPtbt9/fNL68AWXGM1PEcfvxEARrfQJsLX4CMRGFYWDb9Rq9do/bfKMNm/WKCLjdCR6+ZEpDI0Mw2o3t3lcNVz+iJ4VjY/GjxNw3H4l4rAtoCc4fRX/9oLH5ykQDrVTVd9h0K0x16upYf+rsUJ+mL/b9OX7noUek7H7fBWIWeNesrqxVz6RBdnGFdY+OeuYX2m+4tS5UtdUPZGO5Nq/Y+fqvJ+OK+CakzQb9iq7AaGNtnfEBnKQl9BocOL7Cw36ebaVTrXfXOE4vln/g7eMj8KHitht5GWZGVNTZxiM/UKQG+Eshxz+j+W6cIlTiU1JDBg0Wkk6h/yMXOjQ/6y0YRHaOEvob294guTF2B9Z2UnYfWYg1rz4fDzS5hUBbockZsP5sD/Xf9b7lkGpiZgIOpnInsyiagrpnsZhSW8j25fiPHh/J58VnhmgkDQ4IITyBGDP+g8HtHOu4j9nu0G3+9cAozCk9NKFG9OOFPAxHG/toKEz95BGrKV8D024J0D2X71vik/5m01njCyau+FRMjQMc52PCWtAcPPZmxOtPuPnOBZSyqQBHoREEX0p+FKkP1ikxnrcaIOiMo+zuvqCuQZBg+Nt6aSYVjPCC3x50DP1HPO2pxPpyrP/nxUqiRohXMiIlWhyXCjEC8XtLXpj8cfCy7DDI35BcJQ5F1axh/9tYAFcFiEKvqyYGc3DJ04WORCCI/D8MRX9lWJujSGGC77lVTXAmPLuJSaB5Mt/yQmUEV+jmAlQYfLi7bhVsJjOp8gnK/dF8yqoUlc75Ly+yvavGt658lDzHAzXoVNTts3DqKeKdeO8txDUUyUp4aSxCDYVHq9+vhhsT0KA5PfQHILWavYDY8vs9DqWEl244kj1iFTP4EYmVugJ29XKbl3fGpRs3YTntXSTw/7Ab/YEvy887JLTbUt2yhWKRyRYwY48ys/n1F0Kt9YkTp2j+blZMDqhWyWFtnLOvAS40ggPfHUyZONCXxt2W9xUE1tyJM4MR+WuqRjB5n5KF4xP2Qjl0D4tbfFfytzDrSmf11aDp8i+BrWlnJnxc0Y8eA/qjWk2Y53EMObOjTpf7E6OuaiQXGXW8Gp/fq0OQrOk8IqpkHDtW3oP+BtF2+slmVvgqnfS8koHufFi7NUcnKnQSvM7wLQbO/2KbsJsZnExNStsUXIm4zpWnG898VVX6mGxSnw0VvNNHeLj4sACPxdO/BtVlgyHEPSwpvM70rFAnG9BKDGBKvrT/1fNv42AG+Olt3EhoQfXlABOQj+YAl8T4gbI+Wbr8xd/v8wNcfL2inx4jiFP+CUGGQjhD3Vzc6Vr1xsKeMh3FBhdPk5pzun4mKvjitvUHSNOr3GGFuX4zg8L+WxLao7Mf7AWyHMlQu2ycttp3qNpK+8bTjKP1KsHS0yyHVKWI6n7gnSFz1FoY+CnvOIdg6mMbjbaBTCLQm8KO6BcSbg1rRfZmmY7dsTODUvpjjydKVL6fa/IpmiQXsYxcOf8kFXwnFhQFPNMG5S8OsNVzECmjmclPC86JlkBkIagKG4bYcx1jI6o7EYwF2Q0kdIRTjhQE0ZzUR9NI2oMDYQYrfkNUbl7N/UeM28758k+1Wvqpp+/PxTh7EJDs6R8Tpdpr9qwkFfrbHM+/RTeXhRwv3z23QjSK98jS9QiWNtYbFZdIJM2ve1iBzPzQmYLxjJYY1Js4VgX2Lr1KVU92hMMOYZFxJomQ8ugJishj5KArjCKw7izH7kR4e6iX18Gm9zZEioQbrgW0yHZSHpYX/22iRA2MU5y1hzI1NmDBZ+/oMdmoayf8x7+jqeJkFFbMdnjg2Bnbpezr9my5sYhX/QObAvH7zPASjzL6n9BIJuJx/ujSZMJFyaR3gYLV9qPEoJ/JDoJJHCLRMQZy7EssNbVLjAn7cd70hi+eNeWklNiijWvmJUeP4JcosWFw190gWMZALh13A8eeIIQKAgYL/jBh3Cvuii+FfRi3fs0JBb48DDrZ4R7eSKzfLVz0qDe1/wgy+jUx8NElSHqyyyXOUb7WGvsTUJYvouNVx3J6YRMSKKzRGXcmKNsupq4sRu9B/K6GfJredYDgUTckT4Qb4ZUawiZGD8JrfCy0CgSbxO9b3fClhKy13IzkDwuHyZJRU+EQlTBINceAMlewySsq9raQmUNFU2hBD47xn+BTPChFg5geIckLG4rUk6bc3wJ1kkmq5aXeE3m6MYkTR1aK9hEuLeVM5HPH2Cr+FvFMvhhsKYNZ6snIuzjJS6fGJPjrm6fP72D7yzmq2fZ/wTVW6VkjHXvXKegqxcbdpzyuB65oZrsiaOndqs73OWnNwJ/t6ExpR+kJ1K7rdhHxqX63DHTFtSvXWyJ3SFym31+U+DVbfXT8ufaxfzvx1Zz9vCMBqLjC4yH2evRKMPZz8xI2vSVCNyiXiNlVCApzdGMefA4Q1/szr83HPCseWcp0k4jMa4qT11hEnyYy8EM9Inp/5bOwRGomOC5SnVLQwYpUdbR4kuWsF99x6zpHhGfasrYNB9pmfsdV88rZq0Xxq+MYaRFPsJSCEe3eKqmMmBG/fVukyeD1QbN1RyOWE5WhcCoyLetNeCfowd9lPbL7s1ePPg4RgU5J/ori7Jr4a/4dipJvE98LGIrBFntm9JcaUREsQISU8QoOxBPndantyGFhx1nx7p6scy6falLuWjU1K0aAXaeyU1xvlAmnij/OA0bF5YgRNOoGUSn/lFZBFsj+hapeSQlr0eo+cwaGXU5V0MV/aGQRdGTe5GFgqsaQ+fRwUURcGWxN7XaXdPnYVEB2+dbFzQSYxHWGuHMK+IQdBv2CtpDOPcMFBvYZ6B62CUP/xl6VUCvBStYUHCus7vtvv15ALF3t04pR3VejRRGgNsV1Z7LsBWwtNZawk+nbZASoOJ5i2e6y0HtuJDar3wnWmVd6/auJqwYdWanxMGZlX4ffa14GKiBqC9IjxZ7srTgdA9klDDnCazgx8nB+O4H4faOUh9OWPbNoBtCr1CLxinQ5moZcuIqCOL8IiMQaS5iK9pbgu3uG2B0G96EUPD2LJFLDzssvnsQgKOV2tXmiVDLHrmNFFzdOgkLmzM3hkCArWrGMIgvZSMdTbZ7P/qgGnhQstIrNaRYZO/O2OxGNzFB4oLthmflXrohIWXH/OQEwldwJuvRaiYbCF/fz+egCPAQSf0H/oXMeX41ZcSOBE01JNM+f8Rh6EdBPO5/5FhbArSalaPBvoow2P/KbYO5IAZ2uz1qDJBFpU5o627hQXIUpB6xqJN/aGg2kkIlybVO+ZrFTSZR3+ovjjW2kRgKA6BT1M/yltiCe6zl0vUuyB/OcM+YUqxwTpTNxyDI3Oq7ulj7K7BVT0Rw1/01xsgKQB4xPfVcFBZ2deuXhRc6F4wuDOfGDWyHbnTM0l2CtOtDh3mfFh6onR4f1DDOA4JEG28caQZufGBI4ys+NIYE3xCxfA1MvcBxp3u9z0HmPL9Z7iEhHpUOUItfjtSzdM7kILyynKSO7QJI0zd2wsYM15lwPx5qRhBB4Foaj/APjx48M9xhNL0yKoHcefe8mKpAR6TWyWVweYcKS7shCngKsFbrsM98olHoPhxuESZtnqQfSKGmt8OKoaXJFgHL9tIM24mV8UcAGJdhOwCyTaa+JNOPXxNRqvyzP625NXAo1Me94sr3WK6qVrOgxnSB3Qum0fIm67dHxP5jiWk/dS8edjZ/d+nnIaRqXqxRgUho5y2DpX0tXTiUb/DlKxw6gTeqeWMeRfary2jb/LpdGWtzxjOmGyLpeS0ZJSCifEwHc9oVdWDd7KKcIIwGbELsFe6eHWVyK+ZewU6OJiHS3mqLTJyRcZVvvFPaqr/FSzyCUDOa9//qZ9OJfw11ohtSvDUW01/DUcVGlF5G0bPef+ZIAq5A80haHxJXzaVb7P2a+BPUd8u6K3V3eq/HQOycOCntraCHPEfIbu0qbv8Roc3SMOqwTy0yzMY0gZ9kwHkVQC0/M1RscXBTEXIA+uljWK/bHRqBqiUKCofeZURdAnHn7yQ8L5WMSFKIsz7crb68DRUs2WTZzYZcwBTz2P5qR55ZbF0eMdLlJyYhPOA1fZQ07KF5Gv+UYhkD7WNyKbL3xKjSDhMO+4vQlTh13Kpe8cOCSa52XPMURzpaFN1Lhnl+oJ0mzTQX5dCoIRhdOmv1tYgrLPHiYG7dbHrGwqO23n6FNIB+/e9BsB5Kg4Xq7xn6nEBi6T0Tjc0arL4TtPC9IJfBHaBv38x/c63a2Wi78XcAi8Ds1z0dXfZ1S9QgL/LeACJv3i4PNW3k/L+lD6LkwJGYlElqwZBvYAv+WIc9DkkfCi1mPOq/9w/rq5NXrQ5pyfDfaYMoO7/uwTXM8mIKOvVJ94H6XAsFK4ZcoissybedaxNBhzwox8ElY+9Yuie+g/QcNtJiDTUVuVi4kvbrwQeMUVS3T980UUIjbkWqzcklzCjRy/dpO/9R3O9cmJnSONrTK4CSmCLxU12Wbw5+XcZO5oLEol+1r1hVaDXAmcuyf0MbIQFa5d5zNlUPCjYKogs/GYFf5RGvEpM7IBQcnOHOlG6Qvp+SN3Mnrz4JHiPsfuHhtFHZ2uR9K7T5orBQSElEmQAKE3avsn2h5F59zQ2Orj2srmNrS9FUu/cgjLUNBgxk4JXF0q73sjkAcNzHilcCmLVReZH7G+18BVnReg/5voWO+d13PoOOqVnzC4LasPuALRjpWZnjRt3lFWIzmIypAn1n9PLG4y6KGhcSZ1QlK9xGg+LiPKTfDtgokZ4G1yVnLryTCjW4kZX6Hcnblqhy5IJXD+1HEXdSxmTJOLmL21LNRwDpo56pXHLxEqOL0i2DOPQtzn6sY6YuWCcThI/7E2r3Z76WxvsKi0P+QZN/lmEmTlPg0rCW2R3Um1890kMeUXiPybUffmlMkhm1/CstRJePYVPBUZLTZYy2qLbciglst+w86MdhERc4xMCFqtXniQEQ7L1xM0ZwMDanF9kln/bQbJfT8j2CpIGSk8TRWPcKtODg+U4lhN1vSH+iW4J2f1565k3zF/kvZEz6r2LysbSVFDmV8fEWI6r8hTA9iJ7/FSi0Yfp4Li3y/wFpBfBO/E84Qnf8kjMY5JupvvH7sD8TPBEXPONZQerE9XENGxJfYH6LqwdTGImhyzJoap3cN7gBI3s528nKcuo95CIdJ8diY/jwS/6//T7DsWnrlNWBN5h6qyyTEYDiPBXg6C5Sw4/qIyeWPDm6Z0xTQd0Nwh+/hJw639xPr9VzS/aYpw/488g14KRH+BctL1/94KgRpfjdcnQVsNmhtZGrWnRn715+G+r3LPTq6z7J8axXBL7KfkT4iseeW39sj0uP4PW9I2CS2G4NWexJwceH3MygVkMRUsZo8ihNC/YTqtihvqvVjHXXeLHFBfx6X0MMgdmHuNKaCl6kgNM1wBH1Ps2w8yvc5umGxEt6ouJmq5oetOoRxrhnlE+pqV8TR8B9E5uErnG9Pa1C3E1EMrYUJc0q3jKsw76vIOFOTW8rUbqp1FLAKO098bs3BPUlTUJIdc9KcvibjBldLZBU9hNeXCaFQMAmUUkhlDJcgoXKeufRW1Hxo030CMtCYeO0wOFAqDN7n4ITbg/Qp9xaszWXYrcsc9S2hym98xappy0qMkwr0YIPeMWyxQc35S8WyUbTUfi5iqN/th9IH4FahsjBQ/G3BJodDUL9Yv0lG/0EAYnHI4BJXj1Rbjj2O3uVncAB/ENtxqKGNXnN99MwItQ7/PFNEDCMTaJEC/ODb/3oDRfo5uSisaIVCktXsxYhhkrw39ym9LCT+mHA2ZyY8X3EMCJ+haFVNt2Y1Wos9LRBw1ZTaTny8ufuD3tdc/DijTnEaHmG/AyhRf+B8SJh585EQyKjmvilxEdxJv9ZYXa/eN6tb8TfHmEbyOZg5Gj+/MtHpMXqc9tFZrxIkdSiK66lKeJYJg9IEhhxst3ujvS+nUgZt1YU9LopS7VqCAEPnIJYqQRN6l5lXqruJKAr7zTnYcKiJ94q1DfcSQ5vKWr24H10wibKCSp5IJCAEzD0wddJONJNVtO5TgS97e2+YU5CWQZD9t+lQ95KMUnqCl1a5qmMhal7xs0SEzhlbbFHa2p/MFrB/ZynJSnzFH27qllvjXm8yLFdtn0vYat6L5s9CNUODmK2ObyCr4DdB+t5h766Ukd2mmvAOB/iQgrRX4G+3R5rEyps9r3JAhvT3XtAwE8e334Mr51fY3r/AqIcnMfKNyKR7MrfLPBxOcZOXNc60SYk6CagbGNOz2dWAXY6GDtgqoJh0VpImvM+zMUOC/EgpyFS+3px6QzhXjhG6nDMhhqMslppr3B0GscIJbW+Saoed+Bkn2djPgs+eJhEFOG0biVMtNS82lG6gE40z36C6iw8fdTzQ7d4T6W7zuHX8s1kP7aViKyu5AMP/Fnnm0Txv31GJx755WQQq0f9Huj0g1KIgAADNtm+2/bNt27Zt27Zt27b9sm0bs4hZyBE2RnN4w6UKW5YbDXwM+kcqefz0BexWH7qm/5H/AYhhgyaQODcFtcDnMwv7UVAmwUfGNzbmWriTx8UwLFu4BGmntBk/k5tUXJwMouVQt79y7tml0bnYmlPEfVwPj+O9CZ/hNeatK1pRFlQgaC5RnXlteAB3e1oe3j/ofsO8iMZmFzHtGNEaM8bqZgnKa0dGgjBzWI3eJtNHLgbuNC7qxkWbt99fpk8oPO837nX5x2/3qGAxOOCHpBFNigRWNQQ33Zum2jWrOnEMRpW2T+mimVwvtOC987eSWPSLX5rf64OCgkllmlO6K2YaTd4oBos6wKPWVCsONcuTvoOYI3BYuAu2arhh/BczdFtuffQKRyOgc5ZVWf6nsttVsAw/YImJJg+L93KWoY0VJaa2TuKIaHUlqsz29m1GZtpkxdmksT4u86YeuxwuwFoRYdUYQ00hnl8dJMDno8uIUc9uei3DaXN+gBhboiMsHaSOdFMt2G0lqwgPbYZz6NNuLqLWtdN8LqCYWW0zIzfmg4N8QV/rLcYNkDzRWwJGkHc2LFcZ3aliqE5wLfKibqDwrhbV4v44e9TlpaB9WcX8RNPnX+MKKUTqOHVbzmib787PWlYVIx0hjbjcZmL6WiN/Ieev0hiZet5IRkzmTRUc4heOaQlzhQiYX550b5xsqUjXMWm2SafTBS6O+Mod9sR2YYOWf6ANr0QLB2gFbAf3yc1PrnRQOen0ISPhkgPzZDyaYCrX458LvEOA4s1k2hzVNerbbkllEXHsRA45JFTsVYBQkieY3y6ncoLe6ptr/JGOYpU0h7nEy7P1rum0+3nUGORTQX97hm8gJAHkCpUE1wQSNVl6ScQlY4mpsSQpKjYb5g/rsGjOuavfBWkyC30vfQc8BfOJFRJv8SkpwDB0eKHXH3vWXn1jSfIQAEkJXD5Owbcp0YFqH/yO1x+DQ8h8E3OJZTW93DgLAdwenNUSxSGuvTMflY8Adc1h4a3+uGSSg/cg7dPSDdrhxoA/qkm/QvpB/ebo+hKw63ubEzw8UsqUSnITDQZYOkkii2RpmZM++zhiIsNd2oDrKXwLlGrOZW50lhPe2hprLo7Jbs6nzIgQQC5kTfj7zeyC/FZBwsTRYjiYhl5cC4HafVpjlEtmoXJpXc17H3hI2g7JZpwNgEiN8mtAzW/h95EmilvpOSEYSIQN1nTWclIw8hk7eWCVYJCnhT33T+PcISfX+LD+BUdhPv7HDAEAa+EY2BTHXpQHXTzzhTcExSlnyaAFeCY5N25cuGngovdJnAuGY/PlVYUw4xvqDATg5Vs2lpS4MB+FvZCBsFQxel6Iz88TMqfLIQ5oG8cRca9nXGOm5EAaNocCDRc1bpKqcKw2L7++EQo+6Nodr8x0SzNOt1uYO+/jjwkiVW+xMUQoYwdpFzqlcv8eknDP1jO4jLGCmHYbZdvn0aq7D49UEuzxBB6zhLpagPKK8ZK3tgNTTqLK5k57oHwLG4KPeS2oEv2nsh0A1VjXWrn9ZZT383t0IrWDTo604aTJBpS5ZjRoMHF8be92rP+MhQV76pNW8wD77SSwjgitPE/eCyJEnZbgVIZzLmDwOkaQUCeWQWEboYmQhSllqJ13pxReoS7nBalB7vPKJRBQKJd25PD8RHQNzKCV2ilLKh2rmK4GqcNIQKkSR6v4LTlwtAGPnRZKd0cE7l4v6S/jf+doKfkY54J1U/ajP2i0SLkL4NmFqgspxWzfV0q4XITB0ZOdEzV5a7us+aefI81ntY+4AunG4Z0P+9KGMJISEXYZstJDFwJwQrKxEdweIAhbsawR1Gu3iFcC+cqNHsiTQIiU8fl2ccPlPvOtb8TVEKJk4/sPdzSty1fTuFWwnH58qjdJgIUFm5OvPvK2N8VoYwhrnJF2uSgyamueIeajfP21IKCGHRzhzI4OQjFQhfa/eThi3Rx87X4r7U4ufrdg6QN5aSdKF6f7C6ESwby1h8MKfb9LSo0vnbxL3M6Kl2YmGUagUnhWSJGFO4Tofgt6Wa6Wt3uaeqqvCJGD+FHFL8pe8tTRCx0Li/0ZPyWr+0R0v9wvYzCskqUAUuQX98HRhfgeUS+xXqF4WeqmFNfLOyNKN00+yXscr2o75e9eORL/Ft5QvPEIX2SFnVqP1r+mxscM+ZCO/Qfx/EP6mzp+Bn0r3ZDKXTxNJotrbyUO+P1mI52pARYibBM6XYb16CDUVnhs/l4WwECLMZwEKxHTdKg1PbGbGMSgkbOUyaYdpqtclT/wE+qRnBRsH4pd2qLC0qQavTTI9iS1wSQQOTe4l5CuY2c1skZb05a+BLRzglNS3XfhstkzsfPN6Dkg3SV5tGE/f//uE+gV0ZsyQ03L8mW8qR/bU6G5rogjfsNjg+qhcC9DUlmjTSzxW16hlTHB6ijv13sTRbfkTKh92u257fm3qvd5R2tiIDYzjKVMlKZqJKyZIMv9Nm4qCmebIAZ65wONeVxmjUeAnGCXEKOEnbjxOH7Z0CXzXQKOUUHYo8UXqZmGc5NB0lbzuVGMUk3tXutf9p0S2j7wqm4csVBYbPRy+rRWZd4G4zH78SmRouOm16mRSdMst62Sww23E7sTosOh8Jwhw4AjwxZU2eR1yJUSTfZvqX2VEjvc8LpoR/EaXLvTHIeR4tMxnwG8/Hm3tF7k77vDI2gQmf+xK0rAP/m9sUyeMCHoAaDV8Bc31p40ta5MK3uCoW1n3l+laQhyLXm8staHD8atT84j00xnXHPq2mjFFZcEOGbBvmP5CyjmF4Ircfjrohc7UCLqTDHlzHs563vtNRumj/PzxvuXDx0nMHVYUs61DGUNHrkv9F2lWu57eNZokkI25kQyDutjfTkc6pkQL6OTy4/X1WclOzQ9/+u7JulEKnc/COEuYpp3zMJ6R9UHQZ5hB3aWLnvJB33VTAuVAeXqsFCs25iQd7Ih+KoGqjCMBR5hkVebahYwRAVYtujnkqD1tpoZno3pFFv8Qj1FyLrRI2MF8lcJ3SWd6u+6zwkzzUsDjTKT3WL1xMIYnd2VmBdN3hGBzpUoxhpWT7R8WOrtcfAK3WD7ZvG2PBeGsWFloMX1E5aHvwnCsbWmR0BuSmfqC740tJul9JX1gGUcJazj9GM1pitX2RJdTTVrrs0MrcP9eUu6UKvPQ6wbgoPmnDdo0wGbII32X5aM0uSDjNI299hBzXKL35y9TeQPNR9lZku4oBDSf6Be5js4lZzfunXNYsy1SP69wRub6MUjf/jx1sW3ismHDnmRw8IL6mKTDptBupgBKxW3P2IHkeT7Z0tdoQRxY64kRKycQGju3jXCYAdQfRKycY+evdwZHhKUcpXbYwZT9EZNuJmDsioLStLU6cmz6qK9iZFdOSneg5b8PVeu9y6vmOzWSKvqrIp5AZlr0yRf2LlXSTN8UQytZWFaY7zX1wV0aHY4PROwjm/pgWclqF4jXlm5QLkk2ew2C588nS+7YjFdWsiI3zuWj3ei4W6Jb0mfeNXdF4Vh5O8yNF4n3Y7fGxT2yQvr4SvORIEVhz2aykl8NrwbBhftStreqScJlun2Z4lqmEGudmQYY82qSVcwQl0AHti6jOqjEbo0tHi8kJsaI2wnegmSNE3oS5YBIxRwBBeDo90speCaMRKbZyfErNFRa9RckYwlyZ009ttTplbxyfMqABx0+lA/1c9qDIYP3dDLd01QfMd5Cl2SHcfPE4ek1dCd/ZRkeR3vm12jbqw6MPIJiJm0Z8Kqne+lRpysOWwRIdPpeDppmKDlCZ77W9LLTd/n+9XP0veJj8qYmyoz+BjfGS7lcJGm17AOgu+gYProftTmpa6OIAWgKbk6HvvxI9iNCQ0tWLMbXvpPlgxby77vn8QimlzEK59z9Z/T1CC+7/a8J+BM5JQiUrAOa7E9fT/j+ASCZ+tJ92lMFU4D4H+pYsyLHg4+7zt0xS0H/c7DSBTsAxz136kfNhl+LEqjMBjlCXhXbaVSh44F49LzUP0Ja+YG9/OFtkBa8aen42yoPUQbgzTm5MpXnlGke4xLvEGmvU8K29G2RN5sHB1GOefkOB+vwUlPsgW6A6drx12vDldCIJqBMoHCtF3/YpUHxGXI2VtcrFtTc5mtHmhBvwFn+UnOdAtLldeBA/wsehMsppgE5jWyo4LC/ieA4TeHqswZPF8KmxCmZbMXU41OkoSjda9WcEVPHmb1XbR9M02I2AcSLpeHzNBLbRFWbKMBGxNpMflcYB98zyzPDcA25EoYCd9p3TvQIyN10TO0d5sdtJmdQhD5N3teTz0FJuZz5KGEX9xTJJwGEJoLwcLyYSkHtZCt79QKoX3wOc92tQJVYPGW0tgW/P6mrmcuAlllBRvXURy4oye0/loGYuQA2m5rkNCg+ohTeMHxjz6IsdpvUR55XKgprWg4xaOD3wyF4N9ym9h86tmuiwPFdb6KJkIDDzDWKLtmrGVPDNsLOBq/P1mKb5WQ14zb/pP9r6uNO+lXKPhbRqiiUkuDvVz+7uLYxd+dVC0W1n3Fh2E7+RQrRqVHiOhsM4yNkvPk8ts7FZ4I22fNl3FoHK+KDThAjwlvDcwOsyJdKeWfHbZy1L2L7BBtikRnwxnKBGRDJ70lTNG+xcy5X3df1XNvWg4yWnepvDlcbGZoQEpa4roGXLc45CVL/7GacJdb1XL4On/XGFKOk2qntBNiv88/0VOUYYFbZMV/Z9hl3V8FW9Jvr4PdbJRhYY+0NBUxF0PH7U6T7bagBZjgWeWd6eXBLMFqBhQMExiSVpVnSA8JfRikXHKI6A4UMHRJ25VQ85nF0wP2Ox/tIPHkfJrcSJNBqzjveZOOr8XPBifcz9MMVk8kgUTS9Bk4PXhVwre3HQ/ibo1KGZxnlKyaEqn4uvNIn+PzDGutATLvcHqOsRCdMsEnZP9NIjyRyLO1E9/MD7iU3j1BW2aNvqeQpiEpznybAXZMzpE+cxSk397n8mZEdP1MaJjMEG6f2Uyd+H7rgpkUinPODs9KhttP7ziwIgJC2UgTcIRr4aAEdctNBEy/LX/RgofPFa0cyHSQtcx3bhkTEliKnhbbp7bIK3qqJtiufNUNIzPvwm/lhhK3o/9jVSkafLV39hRZQx4CUuyCJDEzrjncpBrcd+M5cUxaOMg8KxvLGdPt7rmehM/3/E9ErhHl66nVwCKsEmkycGQqDvFQmll+DzsQsxqlU/1Ylzfwv+NsXQQ2eUHoHy+/DggzBCkgWjvmS/YJtlmOOFaHL2900ex7nEIvk7RV63nTA0iTsypaqP/E6D6hY+jk4ImLrhDvVVQNT+pYpE8zUeBRRLesdysVEPULvM8yyEz+eI5ZS/xHXM4ty+5CFH6KCtwLDmbKbfeSIPPA12KO01wHlVid26jZO74hx9w3Z8fZOvhqnGmX+Auhgu1rSSywsqtbCzz4SIj9PuWqae2ANulw6Uv9p7z8453QySChexl7g9lSlj8LJVVeydBE3/mEVjEZl6FUC66CZXm8ztW0YkGNqsETqtYoZ1hAh/6bDdrfvfNGDSXz8qGcyUfSURjQvGxEXTlhS7aydhIU/nMpA3+NMltAHdbJJsTiKJm9MKGHIulmcU9N8/aqBBKMIru0fw3+8eYV6ohNWKQiSqject4tD7sGZzgyl0gyzq3npLQWSzBuiwQlguAcf2oBQlxYX/PVZ274N/DbF15IL1is8isFM1xiZQjbS9v8vr9rrP/4lMtO4r7BoXBEmIT7InsZsvC9H8ezx8BO6LqBkxeS1XYZeNKvCu6KvXOpkVZf7S/3x7CcYUDxgRGsY5Ik9ckxYFUu4VB6xvCY991z3Hy2k4qbTZVmTyO+AFnjRtVts1N8jJQIZSDUqGPPb5N0NZy3250pIDGCKX9qSVtHFGd/n2VJTA8357jYUzZBI7/CZ/hoPqNtqfGfyEpmIW9J7XRDBnjdiflrjmNlkMR5nsnHvuY+eu8+ttEOFnrchVTcmVFdoZXfJMrdjHLQlS0xzPBuRWgFAm8hbaB4UxwgeIlI8A9+1/RaxjpWNasBgmpBSiI8nOVwxrg/KR/3tPdI9TleRzSwyzr/8ZvH4U7bCGJRGR3G0iUg7Y19oEfLElt2kteYWtvYbrVYSqO/J4PJEjtxrfU+XftLK6hlsAcY8vRE3Uskz7UfY4O+rQsxp2UlzctvP+fBQOvOgvaTaZ5TC/f6rS1yCrRDwDkp+GIYiAsYyPSSn3vRDyLJYOCiVoDZufBRlKrDLICVn1J09fwlrDrjOxUTcMg1KwV81kIkriJqHYRz/wav/zh305a7YTXR3YrPxsrFtgrTOFiWLjD3BqmEMoecyT9IGAES5qKzc5kCZOzYslf07GUb3B45tkivKx6mP4maiq6/TVSDM6KAKHC6qelTOKfwo1NapKJit99EBcWUmhcHY1oq3fOh0hQIHkNXKq2CRuOdkgiVjPfQetRPJakokvYLG0rhAquzwSm3/V8z4yRepo1HboeKqFM0iSfMzckm65wXC7+LjiD8UbyMcg91t2B33SCzU+MFigctO29PCnnskKBZcehWHzvSo37l9c1F6u6QhW0CXU0w/tHjHsog6LANkKUEnskObx8otIaOH0O2DkKDKTO8SMdQV6myynBQf1oUhlmQ+YhkpJgkxGs2srw9hAuNASue63UAJXTvtqgZtj5lM8DcZq+1MXdtKW0xKCYCM8sYbkrT6HwI2VfIRQyL2rhlfRgEWVDcxF2J4O7cFHFYgoGqNjklfdBGtluFRgRNoJOxVSHjJHv1HgoV/w7wVg1WMcs6tG/9DazAVj0bjNz9ie7y15KB2JH8Xh/yAnjWsi8WIZcsS154LfIAiO1Lc+gsgFHVLL++qFBPgZ6NR5r6m1s/5HYAqPaOA3RWWTqxjSIvb95uxh40rorv4eFX++yb3srpqWT7zdbJFyCcDrKF5VwjCvRkFx/ScDmpbi1FUJ0MrsWylQxNBOHzA5+UKKg981vq+0g1py5tFAlKLtvVIwPgs7zqknrQAmAL82d5+k5zUjs57SWD/l5WzjYHqq8dlW+1PM26K/OVlXvUDvf5WQmGNjtypxQMMsrfeaJcdzgbRwiCXw5+eZ/oxodZKrfw/nETlxJmSI15ikJO0CiAErXnAedK3AWassZe/GN3is/KZgpqebEjS/TFH9Af+yz4DsWE1afbZiclF+CpHFEwyX2Ptl+mQ8Qk2p7G9fVD1wm+dPEbTd3KExiO6oNmxLFrgkqjE6+aUvmmL8Us0OO5kp8pCNm2fRV/iEQ4Z7r2fo+1K33ALjjKBagfMSIdHyLv8YaIsSMIxbB0Rpaw7Ww7yFlgwtMKYMaoiYXgNx2a0MJIRIZX3DQ+CmqQ4YqJTYFt85mwpbFwsH34OaboyyuSePVy3fZBvPxOhMjGONRUP7p7ta/aV97z4y4lREU5Co1reQbKN0leg+B1AYEkEq87YurToqkh0LbMEZWCcMTToZM/xcFLd9u1YQZ9QKN7C+oZCXtf9VrpAzad/TZNSrXmDyFMBb3y5M9GxbpuHhttkh4sPnKEXutBb26pEGY4wUbu/s7VvZ22yQ6isO5BdajBz+HXmiMY/Zsb3cqsjhVj1AS2XAV2SEfUR9AGU9Z4LfTRqa4cTGziLivxsEHqBpGzbdc4l2yeMulhHsRs5a/13hmhM+aY9qbtU50XZTDEjtlxjYrdPqUXjjYt8YXTmq6AjyqB0oRNFRaATpn+ZR2FYySsUn2ck0l5EmJYDpPPBmnmVP/SGsl8To5LOMsVQ97O6tFtQ3HckJqKa4etDRsVfZ/VHjJWAxoRdnqRFTZhGltgJLRO8ft/t9Ska8eBPB4RmRZRJC1ZFPkyQWNeOoeBoYSDwZ3wkLHjqCXVz4XgHlc3fE39MUC6yOvCVwWiGSpFYu840L04xyj9oV5I3imwO6IF3YEmR//BdMsS8OvXgbUo8wHxyg13AJVnk2VLzjWIDAkBKCwpCNFlRV2dnMjy23jknjXjcJ8eIaJk13OxUMShYnmEJDH4LJjJjVxaXU9EGBcguUl7H6t6AUDlz0Zo5NX4+1V+/6Fw4OofPMwjuA6Wv3kX9Vfk0hNdrDoTbIZjGaZIFmkxlXC9+Ago7cGuSNx29pLxiIERMGoRAoVEjRlOd7LWOPpvqH2Wv5f+jbYx/jci7AecytxJrCeEWawZe3w2KG4VGqhtVaKxwVsH0rwM6cRTyBbgYnYORh5qLICSoRa8A4mfrkxqhObMAVWc9qSLoIXKAbLBLwuKwvwKI3HHpe42XXLO7vRempRERWvih9XZtCZ/5Ummqis6TfFfETsTrsp2jGzyvjDs7SR8sTdiryaLZbBmKFBUOIfeWoP8e6YnfWvSx62NdfIn6bb/GFV2/uapVBQBRR5NlDa421eq5etAkIZbSGtzfJHCkRH16WFIKVjR3lo98fiMfWk7+nod4zT/vzk2GfY1pV/asBe4yOCjBH1O5TmxtEyYB9GVIYfC1F8NqLu6tY1nzlwM4p6seqJ3YgTZGo//rvqqYa/jNsfnR7J/YIOVjRCBGS+ORZReK+j/Vr5zAQ6PjFUWhmi1pd5C7DuovMOtW3GD93Cz0FUgPC4whm03elRsotndOWk4MnGC/zEwtcKPtJE4h3d/Su99dE4blmpjm/ySW94LPNzs59IVua89jCcM4pja1yr0pyJ5PmS7agSKlumqAFpAyE9Y97SlRXYFgTt5HUEJmEBWbfuuFpgFHeEt2dsnm4MSfLuLWwcVDQw1z+YICm9ykfcjC+iQR+bVV+UL8B5Jq4QUjeleS//TnIbaNtcffGC9NcdB6PTrc6GDmCtd7HlhvMmWXmcJKjCP5+T4caSSgwhMOk4bwFmb6yRLKv9XZmcXjWOi25rRSjYlhjfS2xp8kzXdE72TpbkbHlIZfSnRXt0Io+2gqoUrc6KKuyvn7QkzkkWVIvOlg2S/yfz9+3fRWuQ1Mb++Uc5bqF/Ggrifi+XlFTwdoQPLeW//NxN1y2P3J9rcscvJN87HovhK0kmvR1uiasZCVtHUyc0GLWhK1hDlfxZ2Yc3F4xstIsZOHq+s2qutdsujMfOmzmLhv7VAcco3Nqog+sBUt+dYAjOiJxedpk98nTClp73UL21VUY6I3gcmmKy3YBPpfDyEu6ei04kTXBLvAOIhUFoATCwlnTMr0lNtWKyFEj9zHIhrTofPzN8XfEAqTfkfxTDfFzG2ujk6sa5VvxZuVWyp8MB6NNEJ5k2+PjEQW81xbKSy1vV1UlG+Agr7O88pmHDA1hDVff/gC6/v/GX2Kq+8W0ozbyF5CrO45WfJN29c1HU5nouktEslEIvWhQ1VRtnJv1qI7/hRV1APwKcHpmALyyy5NwuCNwOdq316DSJFhye4tQgd6olZelzi/QzeWg2+yYWbFCuz7mJ78NgZ3OfYOCSxzcGMJQ5HgV+yv5Y+XU6g7MLyX74wL/P1rEW3DFDi0GqyXM+LZUcYzORbrcDovM/WlScNoS0Y9hHg0ZHmL4PSS6V6fEqSjmFs+3HUvWpbctnJ4JJzQ67KaXQN8QpYjq4ryHRqReXRkM6qZ6WJxLp2UkYCWA1vjuAIOQvg9wEwbm4KwuDR5ZpObwpsWxKbvreEFdg0To8/fPYAt81BosVdsPXGd1UnlbHN2RwNF0O6kBUPO4DYRRnwhz5FeuLjFDsNSN1HsrCUIR/0e4WuTvpnWW5zOqVs+FtpS1cA3AgcZplhmvssWNhA/USTUnU+dungm7ew+sRBZgbvGwy+qcSa7/MjSDnsUKWmeemIUYmRYIMQCAWjyJ8fTsvtMp6cn3OLF6+vyBnxIwtFV3Vfesrpfvw7tlhPVstGAvL0gWyebI5Z7zrGzxQKRsOOin2pfNiekkKn+TfGRTtnbywbqDcDCFwCCjc0c9+AqKLIhJNwxWSSHdU+Gvk0VELa6dXMS8Sxz/RCXFBxbeZpcpm8bjtZHvqPpK9LWsGQSgH/p+GVz73Efs6QQ/KBFLpsO+Ujcm3FH/Lbz/ptVlTXowUkW+KUT1cloJKAhNbm3xNDbFzMBpFewNUseStcJg1QiJLucuZGGMhGdKxyk6jG57drFIOYwXn2Wpno/UjwVMYSuYoWVYzoMyyyEATIS+k4gqbepTpdmaxhyeFHLJ7Cf9/L3EWs64eR3NkM6Pfh6hy6S8uJyjdibp1CYJEnod4pskgQVE2earDl7JUGFnSNKiwolVf47IEn4qGf3gzCcl+ijJGG+TNu+Mf8K+2+Yw1jeS16HXEid2522VfO6rzd8LbAA530mqYatiQ2sBfkaV4TwbDZC8AXd0PUSYmXlUwjSPHTwmIWIDtScUPzRvMyqhqBVFQ5zRV+qfZd1Cy8J070PTSaYzMGqG0wSXHBWa5VCi+US3oc1WAmt8HgEjBEnrW0Fv9BwNCLsybfHYHdb71EjmzTREjvjI4ffQ2q3YpyvldnALsVxtl+9rZBL0UULZFA2U4TAgi9Pl9BeQimTse7gzCKHtjtrSR34KbmYwAPLPaN2Dsnlgu+fIaM0gyY0F0eo5h2Q6bi4dnDEJJQtp32roKJQ7FpcywcAzvYnCxQ+MsHB+JVSt0C0MT4ryWij9RmjvFotsYJXnfgRQc97BBqTJw4XoR5niv5+KnZy5W/v660BkZon00J5a3N46wF0Z22TaVhQqx7R74+9tXmIKwZp6z4GKzGmfx0ZMc1sC8qyZcM+hk38VJDc1+PkYvRSd9M9bK+bCHxPmQhhEOsPaQHmkWdnEDCwSo2I2T6D09vh0hKyVucFnGQe6nrd7sH3w4waRmbb9VXOuDmpO/nfnDLEZI3n6R2CxVJJHXo5lhksaU1BmnHcF9NSw2Bstu3Sd2G6dxMpuG1KikBa7cseRN32rTa6iY2huvxp97ekT6hv7pepNSWM3/6NJsEJi+LZzpkBC3OoUoFt/e6cabGVWgHPXXaEj1+ShqdEii+hwaDPQ4i9xrxAoksSxlK0WerYc+wb22/SUsoFfaODr8MkJZjR/aUIjhohz1m1GPafg5GiQbqBRhOxGCzwvk9IS0sT06yzr07n7r3kJkIRJIz5dlWMrKr//g7QXZxCXGub2Hclu1vKBlgkB12rKbdhT8MvZsNWBFVFukKZyCvUHDf54e2rsrSEBEMWe/+8pCm5NRGNKSFWjFs+70ehGBLG5HGqTeZfk1qAns0NhCu4WHYqbY+Y7iHjthULjesOcNygiFOi17OKL4pnLM0nZcRpoJVlFnK6FoFfdVOPaGcIjfyuMPnPZLcTNCnmt0f3GXRZLE8XzE21dw+xBMWc8oi19+fRIk3eYpqmpwbhhobH628cujSWlRcgsit4my7mlvdNVBiec4Rd6cLVqcdQTx39ZOv8GRY/3GXtets1yx1w2pLumANVtCLuaTnwEGYYMesVqzGG0Re239aivWR7JZVIx3ak9kvRAoN8IR25J1aRyugPEOPyxspKoWdNCNdSVjcdzWPbNhfSn7aQo2pkUw2UrAqDfHzyKM84vRFf6k9In0s2xt/4fKrl0SY76XWPrxwKbQLBHiQEoUYI372gW1HQ/HU/2h9j/WEjYSo5zSj11ObaaMjDaCPzInYQ5ZHhbe9RY0LiJUuB2csSpsby0NWjxa/TChdxgjh3odGBgDjlQ0twZUnv9gxZYTKa2KAqoAMSTnWp8KFUMahpS6TOlE1bxuewlwF4fIHwCpPedByyxKlITQsTYKA3Jd+lSliku10ya+tF6Q93glLLbrIKjPJxyGGTwNnn9e7qwVugDvAtf9oA+2nGoulzCSvMlXYBS7luXJS9jdNcAXXmHslqSbTLMQykHsFOI7EGNrWYwHBa/gJRdD85xLAvCY9fLdmUJmmJqrfJqGsne6/Wqxv5kOiL0pRnV1RSTLomNDAd4XeZmurSS3MNoqnPqOWA932e9VTjGWvoiK6ISwwSqoOF/WUKINBlDACBafrXY193OVNbYasYv+V1XJxGU4Lqv6eh3zfuKVHBIYiEpVzEvavGLV3xrY7BECfMbqx7mnJyJgkxErGk1HR6PovYvdt4rRDXKGd8mwhsyISdYnJ827CTbKcG2BDSGsWXv+Eln8ijGxMYY9EHaFYon8bUBmRy361ZzAH9FohQtTSHHwO5R9KuG+AuSVu7rKd9Wds82hlbhPaKNqoIONYqWnucAPkRVdBH39wm+zohN4fsV+B8E8qQIC9uLnNd4eWJygqzjeS/sXD+LRxhmT44vtdGUgTt75N2zLTDGbYGU0K07yhKTg99rLiLMV2gbvQt8TbCxa1ZGZo/waJAGRR6L51eb35Pdm3JD8hNvgkMU91cpBFtVyzMplu7eS0dPq/uGXbN4Vr+jrd9sm/r0lQfYk6UIkvjqBNzjA+U7ztT4lVw+6vf9j45BtlO4bhEzKitJqj60nRDEsL5LtFY58CBJVJXL6Cv8WyJiADhGb2azGKA+YsEM/hvFi8J8SGzxvWw+SFv0V1HMPOElJqFofZLyj/vMBYyog98Dwi803x3ziMgLTGTT4ciAwDhvSInJyk2fWsOZqLvZb+tVR1zOd9DZpRVOcyXZfatI+qGE7UTkNz6rc0KoX0vJslxPfdg088QR/XFwlMinur8e8LndcU2ByCfUpzsNAbzOFOMXT56sAb8eh5BkSUE8RSYez7+PXRYH8CBnMSfeqYBjEwNgXyBeD9+G08TBWVEZtVUwuv2c9ObSI15kQbfHv1tRSVLfMUVVN4Jv6xkX9/ol5qa0/h1Thezxx00CNTxh//dr+W2AqBZP2I7KWOu5iqYzWv/0lsHI8bVVJ+tJjTqqxtRyeUJlxOp/vYWVnc2vbwAX61ksqyeoSHuzE+FpO0t6BL5/WF7kx1NWHOWA7exMqVZIHvXNepOiupQRUDvnTfsOKFCjob+vQFt/PJ+PbYeqpZJ4SVv3dh+LZcrQXFFufzSFUE+3JPLRgdj+LPIQQPwmwt6qdNIIuTePgxDv/VqEmEPylRECXK/MwFpRRPax043m+k/L1ok7Xj5mXV7D3/ysw4LtYrw44eXJ4N3nVWorI+WlPyE1OzKu0UyVCadHUznOMnFnrdjusA9hDefiDnaI7dB5gw8/gzZQo51A9b/5KztrO3VNUlWUBkwfY6FYeHfIwsUHYDSZI5oiFd6ZgcLusuZdm6BBQ1+xF/o1QZH56CVxXysfxeom3wb6Wm3Qt1t9R4L88EeN99GZmGJKqKYtNS433n2JrfYlUNvAyWa7A750lBMyQ4vOtODvl1hRcylAWx63IAF8jUnY1lPXg97WD1eLnQlwMcMUK1EkfKZyQyUE3jVkOdRJvKeh2GGJFEjIgXsZxpV7nqYw93UOCxMus7JWzqLjdt1P9FadbPOCIvShRgxu+UwH2wJ/Kgcbv9LJ0eki85kR2VpGk0cYwguJ+y7oZnoYDwwwxaSia6J3Yfr0ToTmnlK4fP+nensDKw2l/XcV5xyzQ7F2BjQq+luxZ+So3nnso2d4NP50HsCoqNA9XrEc+i2QUnpBa+uvDJL+Dn4LitjHOVAukLZQ7mb/HvAwc23uNSUFQTjHXblVrCSzEmDa0iL0WrilUBpJkHgPqXSZYAt6GLcdYuXl5OIkgfMLzyOsPu7+wF/PQ3MaGNESUxPI0QXVf5Mbg5RvcPHuLSEuYMYIKuLD5qExvQbwu8UAeYnYIc2IXi9RwioZwpxPGuH9YtpSm47BLzCj0G2sj5PxShPaP0sF8/o4Kbsc++C24XaAKfSPuOzRVpR3i/fuYBqnmg0KlhkVrognaciVjBWEn+gJFV9suZsHKNS44qS/OiwO3OiIE4jAxJMgLuAExIESbCxeNHutQtrzZJG3OkT8xSwg61f59bVu/Y+pc5GXyTMu/Mzamb+kM0y4Zbcj/kbIeBl4KZJk8Dkd7gDWwlUGQ3UmvyXZw8jwGSgphHoDb8KY12Sa1ZSEc81LGNtI0OFYI9jj3Mul1rovx9+pXamXxsmMIYOSnur5VxXGPmdMBiGtZ7B8I+UHAMXJv/Woc4c5+9MKxtOoEWonSz9Ir7L1LeDTda7Pl7izdwum88CeSkGof0coIlZT3lfwmKJwpMqQbakeZ/NH1N7s67Oi0IWDUipWY6tOp5iuU3Mh59JNyRyLJt7BBEIVnhpctUPrx7fit9k4MerRJHMcZZHvyHxt9lROJDCErtOeBzvBJeM6vjnQKVDnc7tueqim+V/xhISAQrlaS68uIUXm0goheGy/2VUbpzxdYRhi2J3tXQNebsjAfiTHCDJn+/L/CI5bcCcSjW4+trOPVvxKao+pCctUgoXccAMdIlAgOBrDINmb2GywLuFJdeeHYmAWKFe1+ELxEx+gnFnqrkapEysgh1fLxe2yyT+HW1VhgvluyqvnM0Fi5S3Vln2LFfpjOB1lAQlQrhTF2ZZaFo1KX0duEymeDQVspKY4ePBMW7WlnThWmgwHKVONVFMYfOYyedRQcFGUQ23AApWLWtTd82RJ0pR2SOCYXFudnc5zVi6FVSQwuDSiL4rLudmBSLzHyx2byda6aQjL2Wd/T/0LF0LbJIMuMgGhdIOd4mm952jbi1B4tXfzxcYtKkojtQOaL3VxctUMQXoJAqxD7VCKh6RBBI4BDctl+kMSo7JzAZEaueCszffo7oWgre1om3wtlT9NJVfvZ932Zhwyiv85/uCmO56WbH7S8XLWUko30EkcGqJhD47aracdWN0ltU6QVM2SV+iUoD6nqOr4JB/pxheYUK4APQnq0CfulWu8YKxASWmSrl8fYV8oSVM2xiPFW0kPvXm+UOGl4lq/cP8MUlJYEtFMvCe5Ly+EjY9664mh/zpBwHxK74gHzpuk61j5QWXvou4YF5pWVvxacffo4RNPvKDNt5c6BR8UWdgTOTvfb50l7pp8fYpdsCSzWoBdGYimDPb6No6QMXjM7iItHBNh6Ns3p1WwcZ+3hpG+tLfIOyAnlse9hDgJM+Vm35DBYq2fWshVNSv8efOvXOxuJ18bjXAzAPeyzQf6gb7MrM3yEST0SMCa/4k28loetrDDogfxNMI5puk2gANcu/4yW7efuRJSUHwxx6VKzmCVMG7Hstugmjz3xXBHrN5IRu/M/kmu3GZ9dO5WN5yZasJ4OUrNrM5C87JLZ8aEgD0EdOhQor8suirZmpPP5AYsPszm9Q1yUBDuKgtZIPCDc7RN8d+GR7HjOHitBAysN/WOGDQVuciInMFB6RHtJ6UvPxDItPpd/ijxPkDIDtVKxUZiJ1YZNUbVLcYAJo+X8rm8ZbAOq61oHac7qJ1TXcmrWE7tqHzjcUuJUEvQshqYq9uq1shqU/SyhNOIm1os36TfLLXnYW0BC8LqZVzwB8Jj4i7Qgn8K2EXT6WXGmt+X2dY3UGlh0Qs5NvQI1vVZ6h1vlbTIY6cxnwKA/V6ckKpHPi1cTQWts6gvMv0CkwNx4tjUSPe+mWtSgJ3ZYZ8iXB6I6nN167XrjWHyGDKf6N6X8bmCCXZbwUgoiDWRwu06nZakinYxaSYGwFXHBVPSV0jUF3tLBCyK9jmhpeTVwjnmy1RB235W80fkwGsct7830Yh3epejZ+o8zxIVv/2fE+5GI6VacZTTzM7JYMS1zapjhnlYtjEqWqwVlfdETdL8MrCoB1b7TMMEoVFrVxScJSddKaw/olYbBTV5YEbZXYjOvrEFQTyoXlqvSOlnVI+9QomMjB4hm/rqNeEVQpvYxkDAR4h47VQ3k9NCI0J+8K0u2GGlIluy2ZsG1AfiMtnxVHDCc+dmWu0TOFmqhHrYd45fxWl578ZZDSGkpq3l5JnSkEKYpkWOinFAhz5kSeibmQghLXaGlqvwhWlcj9C/TARvpNIuv6kEOKyhO/rYFtdJgtsN15MLO/nq8d96LgMs8ivdYQuDwSZ7VxVBeodl5g6Wnh16p/8u9XWFbFQjWZzPyduRvv5LBx3Yx+FZ21R1+XFHmTTgsls20j2Mzw/DXjTPoVB904Oi47eq9rf6DCQvKYrMaF+aRaZqSoMTOv2GfV4oV88lUKymGvIqiZmVBd4SjQ1WzYESNFu2XSmmudXAAkPIRKqcSQIcrGq4B9FPmZHKeRC7ZFHQFvgBmaDsm/fQkIx9fl04b2GsS83ukJjPlRlfHKUAihDxWsenCQMVAC7G2RunPqMU37gr7l4FLFdvhibqDM3ZzE8ftPXpgbfXtLU4YYSAq8AI2smHNcxa4JI+QVXMqFEZ1nB6lQtx8IKy7pEoZJap47pqIVlwAXXp/mgX7R/7A2DgI2lZ+5Hhuu30TqCyzoQv4RKTtdXEPQFh9XEYh/q+KJHC0WOhmUyNNMY8lWJ1K5IlOS2vp+elMrEEO012orgOFCXRfF2FtcussYVbPT6SDPsxWA+7tCr6JaqXHTswN/bPAfyI+AaOHY7J9jHGzphaW6VLutfJLDk5LdGSmQe3Uo/YH9gugDKySyX1JPXD4T5a++FdtgZxYujKEoBNsRSn9JmrsEP/MEKgLAKxK7XqZcOZgo1vbsExWG60ORsMDkHUyxP3cd1chj/n4/wU7mkZPFe65ONiwfKHZ4k577npBOfxh5x5ftWfb87esZjaannJM4hKhhOowKf0LuOU7ZEueHwMqXPpaMlyTGH+KR6dUR+Kkcts/zbvGxhJGcsEMfZvKvaF90wPx30hymx/jB6rfGXxHtyrmbSEiauHLDJKd6zKXIhzv58KZ+KvU+kEWICCWHw/rzPTwXj7v8/tMbsr1I70FkEY/9bkXsUTcfTp6pSmf7vh2vMLxS/lDOLZd/THeTl5MLFACTLXnjTF6mWsjC0D9kCIBerdVsJf2vhnRVbHw5hQ7NR1YylBk0nvoRMO+FSKSRcvTv3hpXRp/aKDjlnQMQ0pb6nIawZXB+RmHaOoREdzn7VqhWNik7ygdGd24/sR5jVeVEKFTtNubi+Mg2TxR+q6UGWA2c8a3RHm1EiG6n/BCGkj/P7NdznYTL8fBmik3hYRZ3me6Z4FLmZ/+ykLo0+rWQiD0cRY3xBMbyqytDjNLevH7InEMATgPopicrhV5K9PjHkN7QzCu8gToQhQ9aTIsUHiSH0vIoP9CRFPkRScjs2gUTzqatAuvfrSbHbhD9968ERKtqFAWNawZCxcEBi+Ii6vgGR0mEUQ2jgJjtQYd9YE1xXXT7Is3CoD3jLV8AYkjX557XmlElaz9LLAjxOYWkI8d+R8KBifmnBsN2MsEgi1kSNB9GnK2lZZLWtuN91iIR9IL67z+Sd0RKsSCp0EMD/kdnMd26nn7Tqoo09GmJGzUBkJssUp5iaNwX0HYeANVcoMnjkvzgoy424y2NECGjduVEXFjZrb+qKmfeCevZsv9x8z2gsy3R016mCkD6nW4+9iA9uVOcxu5VgyhJessZkfwTky3twfDrJ+HitRqOywXyO4CIRe4q0I1Mp+RJsHRhfIt4hD6NgYlJsXsnWWtBlEzNudgdnwHbyg4fCH4H9HfVjCBsUhsTFP4Nzn/LOtiZztIsS/f3bi2JUJvc+OtL847Q65W8BRZkJS1GJnxhD9yyuM9Yo7pZqYZOZSNOwV8I0E5Qo+gQU6kzwb5WHTBhvulut7YFL3rnb693SqiPIGRdpNhYASPMC2Vcc4HujihVUkIuBHyRkUDwkWdYLzAGyEgOkrAb/1j8dOgCzhWMXLm1mH3E7sL7oliYR416xogFygsp/+EUd7YNrVLE4mO+OM+tveufyI3U0AcBfaqAgZCmKR3IxJmY0GyOWNDhLbLBIlvcPuUo8b9iuOCTf90zVlE7Zj1Vv8/DkVBKahwxDn0ze42swIqkPxNXya3QJwdm/+2kKYQRgK8zMfvwJbgJ03m5ES3uXVz7zBMazjghZPlqw+smNe4/+JPD1qkBnnma3s3alRJ39aQuSxd3eJ8hEjxUEgOvRUBiY/OMD8wHF0HltBY4Q7/cDyljvXMylg00+lS1PYopk+kvtr74Q1y1THFySXQ+aMSGj7vl1Nv/7H3CZS+TtD240syJnu1qZsMl9ap9m4ftmpIGS5xbDP2tsyY0Uh1/XBr+wpRqyb0fRaXNvhSDzxhQ2r7ITDwlhPaWsKN49w2DwjYXmFdeuTiD0lMj7u/1ErcQ/YjHQ2JRRoaaUSIDTYkQddHso7AHbxLLu81jcXzRj/lW9WJfFZihyxVmpX454bkAhzTb0ysarkdp46ihqYJmhTdtXBzSoeGJuuTwsfOpuOgZH2RdqUKARyS7hwNFqS9N9sPXBz0tLNGGoZ0VRFF4p6hBa7iTbBNZkdc2/ve6hXst9kn6Gau7SYD/hyt57rkNAy/L7IPv2tNdYUaXszDBPJDTkzFPqb/vLz90o1GZ/mImxbWI2NHW6m2EwrPxUz7eQ0Hjyw06n4MEeK/OUIYbo3ecWQRB3qwsWRt2X3oVjl3jqzTUpPAckKcPOEN9EAm9zo+8nt9WbYz5W7hR9DYhHeTCIPpC3SCaLni/HttcfHNySp52jhySyBfD2yhheXslsDVHqsgJipCjaIOPwBkWFUNaCV4G8dyq1GUCMaSMDlxQl5wvZ8m+B7KhDP0gAY3j31DMl557njsndNykZY8Hz5QWsftSv/KpuiwuFwYIvJvO6yNe07WesE8E+SOBMGxO+6KQ5R4Lgcu2jf/EjCPVz1X/2IC63bbVZPDJkOSygdumL1mDDqw1hjGFmETFzFG5n0iG22Ix8yjnbxe58lf9ypuexbG5rIeMo30jspH3cqW7xPfZ7/Wf+UVomE86p5WvWOc77ChBUkbZ51OlbOHsschh0bBOxAo2Cj5VfH1HA0KblJy3/xLFe/nheP7BFtvDLZ8tJ+lSZR3vdv1nqGe0c3DhDp0NHNqlWDo/HmtbhYGD6EDqTZpGABGNsfC26nVfMzGfD73bHK3L69Lm9AeUQ/9xzSbtETpsIDVSiztVPeHwz5oFLiGfHffbxc+hQcTj0s8951jEZ7dYfBo5m+70NiX+uV0ucLlkvfGnTEV1FK+oS5wdYJ9umxXF85BNYmvJltTUJx31pSdutihKw6w2u30riYInn2Qg3CYVtaMstptwdB5DKotTjs95c3K/23uL1KjLXk+nsOV8jfmZUNi7VH6sMphuOwmYoCufMQ5shx5GNXmtL8wA8rHV9IZiQlv427TgKTgooLzyYxguFljhuDLyWmdrnJSo8TD/4QkKCy8Qs5GX3V6jndBgQEcjoGS0nEsK+5g6974JiNlrpJx5mvwMU/L0P1OFoKtFEQ1Boml+j/JpWmXn/Zedf0Hq+s2UeptPDAOJfrH/jYoxxia7P727crkT69VxYm8ShjLA8HzRlrMZlC2GA8HKr/Aaqnq3dP6cSv7i+GTgZgTnsYWIPDD2X37jY8OhPFYxJTOZ/xnNhsBypcElnDYO++GTsKUyxRWFOU54Ju1qq33sx2RcdNTXMVKftpL9/vDlGKNV8116UrXuycg5xtmWypqaJ4HYaMiG6ULftXFwMOnI04PDySxuZwvx5UOhaB2wm6vl5/+ITEFri8d2hw7fEPxWbd5MG4kqzXw+WJKe5lEbdVNJkCD20laloER47TVJL5oBdusRB0F+G5tsza/8Z44947cHHeSg+SHROdIymrF0CnYpA96juFcgtUv2Z/ViFAxHrcRKpHKGB+lJpK8oQi/2gruJ2npWyT26aiYj4a6NURf5dvvRgBHalzxpzTYr7BhdF3juN98QuV35nJ2+hkSMUBgVrlBEw9JVVI337RgoOd9d+uo/hl71yGuX1fmecCH6avQesmaGnl9isvzO6q7aCqaAuDQITjvQVsX9LXLC84QVDCnBbWagYcsI+zbkd6+cuKk59cG7XxolWrOVpe3jJs+++uxiFo6gtMXHpmmgQqke0R+YUvUYdklllzgtCNrw0ZS4+etahZvQt8Vur0gOUmGCgcdjR5HG7W/YZj0xeFtGlA3R7l2VMCjGCIw20IhkTWryzBrWtTMTjUZltANAt/Bv+dm7jioaTXCkMu/SkiYzNAEqgkYnhap9O0pIaoa8NTxahTW9WQhD75wtw2z3dRr7+8Qi3tfgYo6w/jakTsbVYcMZH6BN/wFEWOqjlaGhenf8HW3e2rkq46Y8z6XW5cuZDlxtE9bjGJhx5r1/I/lxOWjBfjQrcu6NsoX07a+Mi1GXg+lB4KG3LdKDUYLSMMGVL3o9zMNuvj6zOr4nd9HPWtCcAKh+duqj5TtrmBfYDRaaOwWCVzxE4XX7MeOYcaXzBWoLe4u44Y6qiTMCM7qNL8ftiOt//SP80UDQy8rbiSFLa6YMQKQt3Fp9tQZmiBkc79pNnquWZWFm3tu/unL8V5w90ltgWfOvICCjr4SB9w9AkcTbbCFRrnin7ZCaMFJGvnoJx9TSiNankl4fEFh0SzvbEhVt3T8vNuBga8iH2K2S4QY66eceXSUmPTPa666s3JbvBMlz1GXDoeGRNFww+jFdZq7rA14ocmHIWlNOrwahQ+qXqGseKLjSN80iauvb1KI9PTaq2VS2WBO1/I/tz+zcBSWQfVMcAn9lKY2biM1I+2pb8H9OgVv/GRHMqOr286Rzu0F7Qvkf5FoFY0XyrrZgq4FyUbfUSafyiI7Q58QzLGqGUtoe/cgPYf6//r8Y2NJ7b9ZBGLVL4gRPzVX4sTAIFP/HEn1nwLtn5MDAwdnhA/fL/PgFCgv6k2wFMRuTdettoBAbFQ4eGwwf06+Q4uAk58/zXvLpfEkwD/bPpYXTFEiGpXUdWQgU41G6tI4JlrxsAuwQ+hZHj+fbcw98T6KwLOvT+C2ktP82gku02S4YtRgK/QoZ3RqVMYgQRjZY08q81inUyByYAgg1APl7UDwwepv0EWywkK4JeHMYjLw+UetilCDLatrykx+92ncc5az0NUzqFkMjbjiSzk9ei7N2N+TrSz7TElMnp+5fnaSJIKSisXGz6Y10EBPexgkHfJyKNNwpe8iiRJUYulHKh1IldHpIKAHN4j5HycZBCy/btbZIor2/crlkxwKCdfl6JZc2VPVcfxiqW9Ghf8GrqjqGt4xRUWds1+0JsOvxYGmNLx7YbDFccIFy1UKG22xfM43ycjQh0YdKvo6mtr3fHmwUTKvAizbbDE+S+MYtTuZRosFLYJSExFY7Y1RjTkgKsK0e0/LvmYIAU2FuXiRXPD2ncN0XgZElkGlRcBVCsrkC862OqGyaAKsuq9yt/b/nEHbuKvUGL47bJYvw2s0h2G9wsJJmEl0IHXgh6zDX76YJXXsNCl1PHOGDX5GyLVCAqpiXDcuTKH/HtX9K2WQSu5BjOouuPLhGTSy2mHSo0B3xostqflMkTqdHiFU+9BAIq0URiFE3c8NzGD2IWaN3j3rIpFeTjKvFx8xwHUshJN8Zhb3jsGA7c9m+sOfmBF9G6MvXQUosoqpHFMceofqt041OIIN2iJr646jQlTBWK6r+2mveQ8ZH2tKIuVvMVZxPUO2NuSVYPcOJkAp6JqZO69vdnMxVJB3n1IwHLN0HxWPsdM/h9UYdCSqwVtJyUIGFsIq24vAjKw+0+tj+9M6y4xe0LVXN+2gXA0x3aUsrMygbIriYDpix1jp6x1rMmfmSsulJVLf2f51cUsdzgYM9fhKtH3DqCsDyrzlCiozmyiSYlFc6gejAbjuUaWo2fTu4O8NLTWdIbgLhKutfK0bCkTdCUGYxoyN+lqQmgrR/pfjiVnBND+9l/Fso1h9CDVEjuBlC1mytlLWn6zmrRKlyUK4sVmrFocjGLF8HPdF1QmlkSiTvFDwH6NBvLGCRyW8QcjYVRXfRoEgXKadn+kXhk88rD43kwHEGuLSBTp3bQ9QzzZqPOhecjwTR0SkRrA0zUIBRIq846mX9Y06WykeGVaySC42opRMAERf7zf3NA7r0v77RLlVYb9ft5nYdSYqE2tDmyLVONjH+XHAU89hDqJn/YJEZ/f+qvcb4+BvkVhAUvNgIi4MM63+mN4Sc/ke6PSDUoiAAAM22bdu2bdt62bZt18+2bZs327Y1i5iFHJIO7rrPPvTgKPksfHmpafsHJUUuZuVhoidbR2WC1GnUuMP9JgBHFR8ObdWz/wj66jmkaNYuJ2hllL3SfdenH2e9mNWqfqp7Jz/j+LufqYfHoB8wtS+sdI3d7q9xAMqGAevcP6VPLVIa7Pdv7yI0hLg9C4FRfWmHCoKYvOunAIvTCPDJNS6YanMMmvJh3XgMXihx+LfnyE4QX1+yv/I6TSdW6uS3s5g3Dmgai+zUn8ZbpYx6H+UjLkWfpiPyYG3rGCzZnWdReod+0UHbkFvMRu13/ONXAw4SW7VNfi2J+GFxYivGS3NKDUOBt9Y4Ut/adRGBz9n6upMImUQ107oZ2EL8bL3r/b2w5PcwmYERDt76oK0Uo9c7/1gW6+tGSjLlmYMGaZC16Lgkc47NLxl3aXwextyhcSMIHQbj95gprlf9iWyNBqCiAl9QlLP25Y61uc87LvyGEdgv+rGm2MEPYkteYJ1twWiEM5jDqN1LTv8fpu4mkdksoSpFystKxnm4k4uanNVh+ty8QYT08+usK+Yzb6kfrTFU/EfwKTpLljDVGzDLvplqcwF21smWhnfwmEyktk3BYY/RXyqaIevVssdqL+f3ktf7YQw7XKQ84hUfF7bOf2VmpOdnw1ynJjocDshJ6hH3VAB5um+dSJIBtB5QsnM+utnmrrC9vhoJ6aJM9+aQORVqCd0KTlmF/p+2oVt0zoZKFlSMu44DWLSNm8aKrFTwNDzq1XbtXJrcxOAH3eXcWt6SunMYFuez/EdTLR3HXGvs8VTXoCRfy2yTkC53bJKXUKDteF/ZjWHetuqR462XDNMyzna2Md2wapU2c33iVkYyQ4SKiCenLYhhrZ5NygMKBda+WvyGSSqbanyATqMuM/pLqzwEeLhUpiFWllt5FWenILH9ArvRGHlw6M/5ZnYBNTJ2gD7IYcUttctnhsaL85an1iRPN0oQr2s97FpuBYnj4IU1SvbfmZFkslyBQA+bC4QCwquZHvSaxtLAHjDj7ql3lt6qfeYHg5/wSG5qc9V7mR5bTtwPDWaKUXeUBXPBDtwe/+n0J0LtmUOSSFEqOWAMvIrOdYKDJmGohyIJDrlFSJAY2W5vfXWXVYGusWX3V+nn86bEg3jkTtHKl+ZpH13IoJyxZO7fPoPgbloixm8CCnKpuBK9c+p/bkRIt6KyN3REdb0pawiR3lHYSvro6Bben07z2Mu13qS1mYdts/+26zqpBTwxMDhL7pohcBkXmukVUjqK9t25+D+q6niaSiZSaBzK6sHsEQ+GhntPPTB1lSQ7cFQ3gUzo3kjGo/zpVhDeJ4U8nmXx0aVEDPj9UXRPJRx9aqmwGm7Kt4GVE3XdoGi2P5uDy3BiWAg+9CNzboXqbqHosnlHuYmcCcYn3kMcL4Vk6U/h7HLx3KIcUeGPjNL1cS95L+emlLTLBkNzoes7C6J/Z/jk3jyNfjxHqWHcwe8Y/Sg5ZkGG2zLQ2DkNaqw8Zveu/Nhymo6dDECZMwnG+3jCHzWKEyPuQatnpDlrtJO1vrMHyG6HYitLqETVc6dMPnzvsRMnvTrXXds4yrq9NAWRjQGQiPevktZwKreGYhzw7AenqXl6EerO0wbKK1AxSNG2UIRGsuzKK7/kiD2k0jBOlGARC4Ivz21c0blK3hMgAVGUj1vzGBgitiOENbjZC+xygVWmSc62uKTxv794ztbZuAk2cTcHufyaUb3z54kjn/4UB9FKwF8o7T6lWaRNRjTX08cOgol3rpCMThlCca/rYUggnYN2+ie1vdqKxIjxh5ZJEhP0KN2NuLinATOOCbfUZodTGUGIJPwNXg8/M+b9nJdGdEI26mYvOIxcQKU5gvshFhENSkx0+70rWRXiCWMtAX9vKSCqcS93uPGSyejPIMhV1y1S5U6plrHq9+gHk5KtAFPJ+52LLK1i9iXQPBrlQuWmuypBDwT25EDWFm0ssyUQ5IhRPTjLmWqLIopn2GD+k/SL2jILTfMThUZGQvAH82fMPzN+PfyP56gd7K+VI8dqwH1gL3rwypZCbN87qi9o588HmkDua9dSztytToKfv0mUSySZafaou7YBTCJMI7qygAlGHYxZPZ/hdvwO7bAPQAnVA639223aIJMWDavzc49XzGggJbLIK00LOxVAh+UmuVXki5h7T62E9zx8UfLkGub1xuo+UgKsFYYYZYOTdw5PH53wa5hRJ7R+BDPsqfJ+kM0gjQPZNcvHHOeL+zxm4Yqo2eEleXcA2tVVt6n656k/uarG2XXf2MNsH1KRcJvb4MFppLLv1PW9y08VINvSfgjRmzY9ZlPW1QCte24xuQ9GAZb8cgYW2573HTL4ETuwKuRCzp8hJWWcqHGoroqUedzEwkEhsSaJ8uLXMI36awfkaPq0AUKa4Z4tM+PnmTD1S+fGtsagzbDNzRsx9xscJ6NNXX2i+gRoMnz57VnhsC2QsmJ89ciWuk/29p+H3Qe7yJl8FEKOE0cf7OoTSqf7feboE04St0S/81SB5r14loJsyGvWSstPNUyILgMajpkJRyOsM0wORLgnyPcFIpZheG5lrK4OlROXFlNad0QlxvjIBSlReR2pJNWcLxVAoUt95JnpgjiGzifZgBJApFY9c1OsLvHczTDY1Skxv6lc7H4lw88CzcxR+Us9otM/ZOe1+JOiVcOyl3ZhOybFLcz8NyXyz9b9gU8UueeVC8PEbYu1ob3Nc2DF6DgTsje0KmCCfKD91B8Z0plcLI19u7zA/fF9g3BQc81ie9khQd1RXX4rQ3JqjJtMMXyqliU676UJTCDl0A5H0ewrt5xfLHZk2fnyfQRN2wPPPNbjdxCQmGU3JsWCHyi6m3Hh9D3Y5X7sFN3cYgE2MXN26/fr6GicVpD7CqfwGb9kxlSqBBXzF4IvjPBUjammGu8izejMS+IkUyHkcInHPy1krTuqLGXc2XGj+EBkD7bZCxn/AaGamGmGW0y3Zw4XUnUtSQKuafKtYmqZEQDL1v9a9SZApzbNmpRghpClYvCxX+envkN6A6C39vEEsDR4pYFTHp0Wxau8iApH1WmYwKoEc/TeEoPiTTrXN6CV5LJyhmXhYoLKwKH8wcJFFM72nTRtj0ixxSuJba/iGqRaNkIuSujZeRQtpz4vlT7tch7lcLSbCYxZyI4SkH099w5Z+eSjHq20S60fnDZ9LPEKlxHK8cyMU1uB9i/eZZg7hBxjT8mqJjxtoKrKVDl+H53lkLxB4FNQvShZX+47/EvHUtdnDg2hlgg5uwZWZ8rVh6ksI++QxrSMKtZMLd54gktMHZQSt2ZIwVP9k1YjRljNpWhvpe7f/PN4LST8Zt8jO3vlpiVcm5CAzz2oHzE6nQWNFekKMnn7jJAHTP8QvVlNULnjb3bCsB56cUL0cQIFCYGv82Csm3a2GQg9471X9kYd2ctJOrHPlzjDScBCK+/aocIonEZw1XIMQHleIt/WFFI+yYbEj+pjFBLhbId2m8lXMmxd0/NW1tuAyMvY9Ux4f2LQmLl+o+3xYWQ4JQxa/uO9gV8G8r7B5rgjClJaQT8w9hyMykCQ7S3TYaNWKQmeUlscaKtdoGybdCrvRRNrDzfZ43tv2TrMrNbGfF2B1Eho/cmHlQ9ToeL8Vl+Rx9kR+443YGht8eP59NqKYiRGQLaC5fYXfljhLOVl/2wW1EuB+LPXgzBbNbJ71it+9eb9pOEU7dtQy/DTlhOZNULLQtsFXTqOy3f6/OqTNBf2QEX/ESs5gMeEcJvXYTF5XSXbVtbZCyKM3OJTy4bSGxRFdOJMguhhSPijWTsLLjlRj7owU3LbFLWvCc5EmSTSGI5uAj0WGfJCcfY8oQ1LcEDBrDhG5Qayixq+QzyaVRYueSn3ZpyLTKkj8zm2psqn0W7pm8cuVBjSpBRyZdEJWgMooj9I2nYY0pcuxHVhHLRZ+SaIeflPmXY8oNYxCH/5u279Y7YDIReC15dKWOcDY+u1N16XHoR+EXZmLYQzzM/wzgsid3hOA83nGIHUc2Cg0P9M9RJE2RHQ0IoeDsjXnoovlHghTuvbnCOedOtevFMpzzX997EVxveBgMRzjCp+klFamQPToPrbkH2hv7TVjCt0aVnaRHyf2uDuZmnzrOMBMzKLPoDz3ykQ5bsQKHj905vDFuCCDxAKCPRznjnmbVu8cKA6WV4HSLiNjKLKPcGwKBNkuToBV9mnUQjKI2VzKeVwGok5vQX02FQOjoeDBrtqsbbOK491DOJgBqlCaRD+giLwcb7/I0KSYNfajSgl3JngJc1l4v/bdn9GZQtSwT94/sooDoorkptCghIQBuF/wAQe73XwJfahPuoNhxJkYmnF9/7iRnr86aDfanpTuRxJUdcVvqCFZ8rb9qjZpYjIapLTBOulYQqyFUapYlpR91czWe+6sOGo5XtdBnGvfZBYCFSjK1SSVnw9XWkA197Jtds2P6YbnQnvwdd34rxeOVlK7lFaCdZfJuW15N8EG42iihbg3ei1uPiZpBllLSeBUHyKLKW0ieeAidcJOu3WSBIaGQ6r+YtCh+gzvG1fI91qOaV1O/ONK9oHUbCvGR1PxD4NsZHoFvrmtk6sVF/KPK6emletm5e+VNo9Tmt+aWWMI7INUXEjEyPQhvFa5a88VPpRx0RTampnThwWJGomPbRhvrhnbERP331QXydXBFGAP4WVZLA6P/ro+cLIA/BTH9JWCGdB1nuI1lFYYCeH/fQMPEDqHg0RU/kAixSwecIPCZOVxyThG+w1tAfXEiYNBjBa2IJLPVdWdF9IMwJaldX3qV7Ui4RNh1lphTPg0L+lWR5qJXC00B+HU3PH+WcKGqVFJmMaDe3W9NxcyFB3f0EFQZdkWKc4/NStf6X2Z9w3FhDnHVwDsoijtW9EvCvuV19QmjOSfLVuXu0ls9mDPL6uvJbhOPIvQyDC4wrC7MzinKrmw6GJtLQYo80yujADShfgopG4xhw3i6SP3N/Sxj4S6O3emC0bSNu6BRPZPkzQd8Tb2LsUad308hd4GmIOv3pA1Eo5itGclm8X7GMq9ppnne88PIww8AcL1LTYxj0PrCyRd1eUJyaMrKOdh3i1i4PBqTskZ2H4XWCzV3atfKHHHQefasbnoPtFj3NtxASSzLO7kcWRyk4COrYpEg+ZkDcCXZNlmQ1BPlySmXwQhhGRpxcAf5w0u0+me2M1tEmFLQyj5OGi/Ff4B8AquUure0UIlcp6YMBcsXmBIO6g0A1ykvdMzR0H9gnsE6y+5ibM0NF8I91pQ4e09sZ2g+BUCaoEADR2uRx94HJX2yl5YaSpLBmJ9QBawAOJb/vDhKUoB7Hat1wF5QmCN3x53RM5ktVcpUB8b8StnHtPlz4QUB34IgE0Jqv13OuqUq+8l8y500+dL5we4AyDvGLh+vQYAi8uRNGu3+Rs/x/bcDsZSqa00+OdjvrCPMTJdLMIYYfdt81GImhLe1dKsjYC+JVeoCWqLI/D7FKWwauVAt9s4OTJQdMaiLKnYDMBsqmvJ+0od9KDbHxoYxzebl19xNJ8+7cy1vppjHyqUx/hgpfPYLmBXwgfBdmSgG2vyp5slInNSawx1az2f1Nb5lPKfwrss0QTn9UxgYCQhQIQ7Oe0o9TS0/pGk/xyh1g3itCZWHdOjSkoq6nAhvjhMKRP9qJzC3zu2y4zvz9JK1TLk2KsPK9kqBpGV92mm+op/x5MJCkJPE42F0q2qISdkDcFraeJ7CVS8rDlkCQtQ10pZS80RQf2E+TxJO3zA9TICxX34gK2+5mXX2Vj3AAi3bz78B3mZ7SeUW0uFAB/FbCh8Ade8gbWKwgr/GsIVTWa1CZZduHcCTI0Iyqu6Chyyzo9eP6zUSQcaPRhStKCzXlWVknuUcXRRpMjr2yunfFwXXLasW3wUqBO1ATVVI54UT/TIF4bip/wSKvfjyV8oLrnqgDsR/gD2AolGJjV10dqBc62R+4f1XXg0fQJlAeAP1I+MTimzXGmZfTmgrz3rQ/2pg0zhb0wFijGMzXosNJkORI2Sm1zuhDso6s3AZiWUON7R6gKNnuhAUEF5sREML/nrVSpxzWDOxzN6tQL6IQMiqG87SXjXrqzPtryYhAW1sXE4nrcZL/rfkLpK7yaRxosLP7ks9WweMXPYRlE/QNaDtP2PNn0hlOdLxedvrU6rq5HmMDC1K+L+DevqmRUj8qe1PG2Lb5S/voTisKWKjIH6Q75aN47WF0wliRw56TqKWnQaBYmOxKTBewkFyGJSFrC/3DUzyOdKxr4SNXaulsczb+edhP+Or2T7xZW6QgzJTrIdQvSvhSJ83ndTflBo9wFjKi117L89wPuXiqYf7HWlU5VEOPPHzL4ELFytnuL392x/64oo2BuklKfsLtjkmA2Dmf4MoFNECrvbEMYV4F71w/OdND9eMetjKIfnDmqE1u6L9kvDC8O4XXPbsA24CyoiVJLdKMih19Zk3tdN0Co77ihkZH+YOMnV/fko5pU4kBHQ8LavhB6Vob+E2GvEGei3uq1vumSwAsE+SNSSnVTLeUNI+/lDISE3ISA/SCf10Wx4zLNUeKFbHBzh8FfSKUgCzq5/6e1SfzHGqVZQ9EceE3WDJ4+2cEwxVUxbyv1gfGdG7m+DGwbZAp1MQrm7gG9NrSic/mZmclch52/77lYVC0jcXaq02vv0m2rWEmxTbFlxEsFaiTAFZL6Ivit/j0HObOrjGIkNhKv4KHzgLvD2RW8rbcvGD8gSzOSltZxnlwWJAJvah6MCW35ytOU07r4wuaz/CdLeO5gyMqMLMGRnZ0yYrG68kLg5WaZdSE2qkO2czvBW/jarVfo9Nnuwh/cM1L71NO56ef2NyXOVaBO9SQjcNUJI/l0zlIks3MZskNkK+CH6q+Jk5jpUJ3fKFCB3hkEV5kOyOT1YnhIFQO2LabT5Tb+9KPC4K7wIksq5PzDIwoI3yuRlO3zLqBzBqZohm85zBC6LmtP3bMm8tmZj/4yz1LLRMqGtezeYWD8YnSJq9bk/QGxJlfyKfJGlPuLYFQbAgt4VHShUXPRtKt0OsR6xY8jpad2kSNBZI0h6bNSd65dU5d6yGq6DGzKRVsVDMux9pE2ZM5Df9yltBJZ3imxJFHyUZBEWliNKHYUm6b8A6hyAe+GPcnVGfCOl/qxzFqMh1vXrLO7Nj47C8fkZvKe/B8xsbRHnadmKozM6EZ2+6dPYMR7tAhx//5Q3Z6ClwkG2hkLTbO05hD/OguE1uGiDHpB5z8qxyID/tvzfpQa9WvJb7SJX4/JBLG9oemzAnf98bVXxA9EBHJ9nP88ptIkidDfdY8ybg2BZtwiocVT6z+77qqRWdGvwjNBvCQtrqnlt1i12V7PlOuNyrrc/2wRDqB7xFjASl9disl8fR0G8e/Bo4if5g2SofgtwyxlhPpUcTT9N60VYXENSf9VQXVn8a5Ncq99H44PTLbSbQYAbWbNeolOzFMCBTXvqOKE3taYfuWdqzlmhY0uCa7YxXGvHkJ09UN05CdLMmozz58sOrh+BTEhR/JRwvWyvtTyUn5B1e3GlJ5CnIz9WfV94afE1ropqHyVch4Q7E80QIhgqDWRmFooMcOfR6eHPeAvxJBiiJukpJBmm1nv/Gxaf77/soe+nKpH5VeJNQYmUSUzSkKY3l4beCKZurqv2l3QHTXzcnfzb073q4t68M2P5+6ecvMx+LmP9LKikm3/uBNZp1AihWavXQF3oQtQiRkMaYbXF85moW2S7GanTzfPHULilVehpfqszr/nOBKDW+ZuQ/AwbP83wT0PdsbsHBJ8aLJNGpjPWBz1HLpFbvK27qZXq1Zmh8eyzfTCuPqWi1e3R+aQv6iMJvik3ZbN7thLL/alL+AfwCASXgoN5r6cWdvwbRq+V2uOXQDZP03YbdIcmRbRvE/BZGzvmUkzy9Sfsj40D0iUozQMFdEZE3FH1iakKYraJaZ60zpfkbkzUTtiOUeuXOnsbvpS9eNwFR3Mv1HQmL96thumcMcdAp1m4MfTfHhZWPutaEqrqxuWD8hH5Tcm4kLnJ+ddmKACtvyK4U85XE/F6HDRdxV8jgAitEv7b665XWAhbVzYnXNLCcJrxGJeysODEFd3R8d6s14PJqo1dIB2KASOiqEUBiPeNslA7iD3tayNhyj+BLWdg0XIhceqNvI/zrJqyi8pJ7I+rPp/7CX8bxMVQPSoMXi1nGAvRlgO2Ktu9m63WCZTVw7XQc8kZZ5q2sRsEneTdCphWipiVoXmp+y+C3yGehww62zMhGrGH3c5QS7YvdzpIA6hpmOmOPn0/7LDsl7bHbbSXewx6wmjPZ1Coc+pKHQq1Y0J1yqWdeYLBbHO92OkK/u/eFUyYNmPhBFWB9rHIpG5b3h5yqbfiFjwFepTWATZQgdVx5d3tUvkfNRzWKRwPBeEUv8F4LH9jfNkW9X/R3yhPPCC4GW0PIQ041E4uA+IiQ0MkA+lRUTHKMpr1LGKdrE6Dqox1Hw885pBFp236nMKtpL3vbl0XTE96NpOmNhhewjgXgCC7uKoJ9dMeVBykWI4iqGankjrUNDXlEWKuHv87XWUn3hX5/SrVlG40xlNyDolrIZSbIyIxyc+YIVPtO26g0Dqd9VH2XSsTQfl0ltRQwD//X2/em4y6P/ZwE9HUBoWm3mDUHXbPRQM8ayu47C/9khB9P0PWLunzjvnj3V7lB7ZW+RVvkQ+Qd4TrPBvKlaWo2aGCn+e+6E40B8yQDLwS+tY8W8f7MdaN+VJNfLoKvuL5eyR0NTAGmoh4nOLRASf72/+47gkNl75po0FDclNsBCvcj1JaS1EDuXel/o2s+DogOedu5eYT7mTT9Ty2Q7FWtmB1DwlfhOz/+PlE761t4zRexlCWA2WD/a6oY+phz9qWNC8vKD56rc3VcYif3vIOvuUBr5j9Va9h3By2pkxIzAgtSXoCGHQENNNeJni5RA24l/l95sPopSdHlVj9N2MRLS1XMgr8V121YtYysRONGH+X8LwyspSIRyBuQ+pgZkhzX8+gbtinRoGiDLnSmeivpjzIhcicrHiT4unXyYnmhpSpXczY8LtdcYWCaTSsqcGT+oUH5WHPfqJec4Cqf4GqfYNIXZvXgavQy1QW2AVXTE9HygObD631fJROjt56kLGtWWnlv24RluNODXomYpQfjPaH3P33tmeWO546g3Ivi5gnscfDUnRMV2go49QxdAjUEP3TzZY41fuIHj1VmEywHN2An5BmjohJzDe9ObWa5O73qQrznkIsafcTeA2OrzE6rWOWph2JagIvGm/CuF+/Ze1qhEv8S7/NTXjqrTA7WFHv8pD7XeyswXQKitXrJNgXoNIX0fXRbSawRCR/MM+7eXoy/lip8okE7+Mjfehes66oZOL26Ct2qvm70Z3aICThcb44zK/rDyjqsHMuVJf1D55GmM/IV5UnAeJq257Kcd6ATPG5RrMdaIBe2I9C578gHGNiSzqYLurLe+h5DS8Kku+w6+1KQY1xpze7v2AYRTQmNhcknMrnMdO6ez5+ths0glC29oVIfidGTmE61b9ljtWE9P6xac/lx9cnJ/2w0I6KY/ud0t5zno4IZtOIbOO2JHRXHtJetMS+XQBmOIqIUX9Q+PCduZXfzrX01TEqSxQ6mL+k1SThPmGCi3rzL3HIW5TnkHzrS2c4Rf9s8c2wmsVd2lRYpHEP4RWPkZIWe2vJLo/TRtkv7cnox2Ip8d3QU1sBNvMTKhEO1L67ZqGEq6ejPjRZNNL1oCfV6RVKZArVhNWENyGazi20inF/mJveZpP4IXOv2aHFeq+++C1V3A0C22yc5aEPE7x6GhXXtpdcsGqbA+zjksluCIoJjjuK5jLHvNuvdaxMkO4cGZcka7fIPTn0/QhwZ8XtLYBRF1VIeaU8uhX97w62nzrhi2ppwV6c2BVoFERy41SftIA/xEmlarCBodagS8vep2wlY0QjgC2DhnHbQ91pm7z4+C45vZaS61XqLPZlVqbeQ11xsUIflR3/+Nf5eXCd0uK1QqDPYfQmreXXn19JA/L2KVY5Db4dhhH1nd7l4AWDorYNuNODx1kakSZVUDm7cvr87Jf8g31BODdzM6i/FZhL4ANPUWAQ5kj6YG/F6doRwM1+CDt/OeTNQ+XMpC09q6EYmzOh09b/IcstkVGViTUPJtu6LGiWjQZEbiZIDRzdHxM2xFywnaNDaqz0b+GTI7TSx58+VKzG0+cwAI7qLEDNAjJ7OZVeN2LXW0hEr40syHQM3/fGR6Zto+2s3aZhchGRcbV/yb79LZEyuv3cf6VcLX2qsfCnUbh66LW8JTx/N+NrKOEdVHiOJHWr2782a2FhR1ATZP4PWkemdp15Mav0khkEi24f7Hk+DIt72eT5BaCX6hSbxgIf7fJIS+ZKkCsjsHk/L8A30SV3ukYSeLT8gKjVWPZOq6XEE0xDmYg7cnWpo5N1l/Fq8fDCt/f7T9bv5MKd+cipeBUgRqywEyv+Yjp3MUswaCswNQrW8GRLW/TGwY0GqRIjADSKfO2FYrInKLXvNVF4/l/zThWgyHCWAhntYLWY6yp0XXt93Zig4yASSI62ypXYlnQfaRUw8XHDyeYu0ED0iHxdRDS6oKDsucfwvD1tRcvo+gc+PdyTXEsYY5y/q/a9zepZSCqcq2G4OXL9S5DkulstVyeEcoeHPtmkGB+ZNuqc15zUnRjkTJxehgHaq4Ra/HPe7UIsOK2lGBYjVIHZqpGF0QU2RfH/zpWIcBnNSaP7w7lwUd46bIsztKD+K9MniqegUCU1ULARtPrcVbp9+XCWs6uJFZYDa14Jk0JBk8C2mQD1v5byKQyQbUd2x3kXjNRRaA28bEZABy2q5PD4YAkCcNZnaVDRCJPDFcoN0QUfVMNhQ+7k8DjIqJDev4FVJTwoRstYHVakZ6Xpckhs9Yqx4hXXd9lNKbGFSy5scxIpQjSeKLIutmzMlDSxIiHUV6HmPXK7M/4ivAvGMySh+/c0Yv78uMcmonFmGpFD8oNa7bSI0K/3HJNDpUDLxAmLUl3DhD6xsinnhBqCsKoOkJ9eqwUBwBHERyTJeBLTXZqVPl4zmFcmEcsjyqdJpXtesph/+kSCWOEKczTIrpFsB1i1v5eK8gO2DyBZ90Y512ZngVlLXULGPd1YyEI7l6jfhwTekP1IuPDlwURg+LgaejSW+87yX1M6jOjFaOJOYM7j+x7wQIhLoozeB3uhm3esaVKOog6w4JecXHk4MCSTvXSNCcXs2lyR2Gx//Ss2KSl5GYgYRzJX/POtlpPrynzuFSScjh912o+GqcmnUmtekXejvkOqqfz1OCs6nHTWz2F/hoZIb4YG8jSef1tquAHN0xWP1yU2Non5xdTLokLVbsBYHdAZeHedtGXLSdPygcVXBhXz7qRGgp83nKMZmnN8oRg7nJ1sCLg5Tw/xo8aMkDA088D3fdetaFsS6XUbi4pb4Klp8ki5f9Kj+fUe2BrEjvMPPqjiOFvzknzsuuFqaq/MPnMPhAqEafa+NCba5eGl7qTdza/taRst1EiQFvxUrlEUdGdwjPYdvigFyjM5MfqUePgqilvHVy18aMTLmAFN3QUTr+m0MFxm7Qlbku0aMk3lJx7BStoQtYgIC4g0wyNIE+5uPGnNcfILd5h0jNQqfP8aEf/LDf8ZY0ebmjV3v7zPdvZMkJI+wL71RvUpi/wh6QHW1QDMn6Oph5lf46bmFzudKuW3G3ECKgKE2Su/9fT/c8o1t88G0wnr4mMLTBf7vcw4duKPxyBiwVu0309aYoJXQtLgh5PdpiyndAwC0mgdZawD1b/1MQ1AZFL+HmGt4wN4WHP6Y/LTvVUIor+psDziQ4c8FP6ht48VO1jNzw4WUvO9B7ORJDYeySsuXvwLoIRBhNUzVREzzl47od2eN2anV3OQQhyVO2Zq4HuTU60l9klCGBLRFo4GH2qn4orNp6pxH/Cv0+eXoUQlm25LpikOjOVwPv6T2MNZrU4Qn9zbafEBU/8wxtmAjsVmp0DHLJ39dMgyrebEnrcMvoMUaJGGizM9gNiSTUHxsfunDroGVXEsz8JopwfLODh7fCLTGjbrHMW6PzLk5Tjc1GtsSaHtTd2PzQgwER4PnDxZ7pjaZ5jxMokDm4hgi3XUiN9vzir7HHsW5M1Ievs0B8164L4AJZ6Gq2OCGUBZlhRZWcMoVHgUK9vu4RpLI47Ba9HRePhAlI5qt1Soe0BxZEeksHZPyz8TeKgYql2BnSyoiEmXFqixq4Q+Crs1mdPnYyKrrAyzV7mtrH+IqKEfzsA6DdmotgG+Ak96W8PIX67mLxFnBbJfTgCCC94r6iswx76QLy49npKyMbKA6I2feqJVrJExCdv0aSQLJ8r9L2g/mgaVR/mcWIj7xJ2akHij8QLLSh0S9T7DzJn4j+JNiEgdkJNLmyhxpZscvZuivkSSk1Afp3C5Z4bbHsN7zVeTfxOtgk+lQQJLBZqLWiuewNsy0myDNyvAWCAR31HhzROOFzLfyUqJiDFfbOQHZwRXNuARObQYuEumUl3aIoKkd+WxjNz/L8gEFKjaQjZLTJb3CROkbfTiyDTHd0oOa0FQdJBP3YI9YSNact6AIMHmZ+uZENUaFTbBdU4AK6Abz78fmC8jDUQWbbfjH/SHzsYlCBbd66QhGI7AfbaSJqIrsmJbSDdrZDs3uH3ndlqSUAB2pkpR2PoamXkUN3nHw66uuODOj7KEI7aDfuq2gUOQsyp0VXinyXHtW8zyE/MZuxWWZU0TfkjHPGNPnrYJYoze0GpJhpz6zPFzGOyWyYyJ3iaZWA0bmgBbACtGuCij7DuC12L5NETaYc3KUDeibLtqKB7JUKwwlbz/FKGNMywNh+eGenlYCyY0zuNvlswYB8iG5djULdudjmI7u/A/Kx7ij6x3vAxU7E/iHwWP82y7pu0UB3jg60sNDFa/XHl3bnBcA2x60lKg7u6Mdlfj6IM65F3LAyvj8t9auhb0WMjBuG0kQ1StWkwa7DTqlvc9UeuW4PGLRdLwf65HX1v74EZnpCfuqLGuVrnGAMgIlHTfBOYsHBMseBOYm5ACcbTy6jf/2p33BYJAWzN2SXtdD9HHD5ehs/JgZhA6kpaPXLYVKA/hMMXLxM32xKlTX5oUfze+p9yZWMq1VkePxG9DGb124kq8tbioV11wK7iTsEvnhOvIQZ/cfWIsAAspM6Y/Hkwrr9/zwF3QqvfCcR6wTsWRYbc+qR6xbSWE6lckwDGHiMqu198d4sVBVFphoyJkxjHXXZ1BZIe4XBig+M1v3nHvDNRUMvDbqfcRh8gOY1QBJDpViTMqsdL86SHEgnUVn9EVjA2sxeyPm4nU1Fcv5z68LUc/AVQke1jOaX0fdHJ1rvVhjRAV8KoJVwsHX/d9UZuiz6JMpFXayuLM9il3iEeQYrh7MXsZ1ruMBvdSjiHU3eUgi//3VTaII5HVFjw1QcIrdF1BTCRwFD7a75/8YJCtJRG782M3EXj9Ds+jiYNTToZr7APA66nEMQJr5xNhDX67j/VbdAj5Lx9j72KeobSMlJXQLpe/ZlQzWgRp/UmnAmZeFCbqIRt+WYJrHVRDSwuV5qAUsNdudqNYgUP51TmDiQZwOw9g8pC1e2/e7jkfvlg2fCXTvjg6yQlaQ4dbdqCZ5OrJEcTxkJekzJiw4nfq7Ty1MX5ltw4zm0w8rR2wWXVNQCyOhikehsa2wkNgG4AqE1ZGeGw72+bvX2VUrXwVJui2fHJ17RQMblz9dVRXv1Du9mp+jxx5mR8rM2Icnlhj26Xaer+MhVRvm1ZIwrmFSr1XZMxu2+XF+4IW5/3u9P1Ic1NtiMxEEJ/P3p2ELtkYMDExvA+0KgexatfYZJth4jRWYadO1+nsJaVHyclQXmnTZx02V6AfAEGgOKc3WgnAcEDnNzX+yI36IOkPweuu09PXnzsa1qx+bO6ouXVeNWVtrjsxP/vZd7Je7VIUsPuNMdCUj09qIRtA8F38FvobzrWex0qCf822DYTBejqP/sBM8833qbrejzlSaU/1wp7kwvodDvtOWyqq4/JW2JgvduJrj7uf3Qmcx0xeTySVla40gA5bTCCt70f8QSRUGTGefaabl5w4QUP7zIk4+Ry8gvhwvi/bLS0ZBIUY5TYbfHNUrNGhXV37DNcHveoVe4pXZMUC2xB+u7+10UNYuE7Dc1Uf0W7k1eWQXgZgvYLAuCytRnzgk3vPJjGGHxedtzT9JJ3ckscKOUW6efaDm2fcOiIwmO2CT+QNXa2NE1eIGToKWu4QhYdkuZD3/X09LdQns/J8rIl4mQyolJyk1xV6H+rtf4XWkE8tQp+XiO8qnxDCBmP8FaxWCCywLzavw6FhPYZUjH5gOxmxfmw9bnsH0ZLGszmgkghKg105KmfIWem62HlnfOnM5FfM0XMSw3zLv9kF8Qv1XzliX0nn/h/oqIhSne6gS1Ud1Rj8C+UqjyxoFAowokBtE9YPiTz3s9FHSAKefLrJq8GEMLh/MJLZ2y53O5SDFTib4uDgaI7ikdnVBcq0ykP4yNA2WIFTiQdyo77cfdcNHycoZV92ThtMgpcgaXCQ5ixYrHgxPZ5/TnudUusDbxlaHGMd3ChQ2tO6b3IN94btaqDO0gjatn7s1pMId29NZoKkkU2B99NgmuCohgogDQi3TkX4tZ/c20U6I5YKeMDc4gcTJDntt9kQhuLoqEHXYhdc7/JGlBuVcp5U1AHwWCFn8DKaZiQfbXdldbQAM/eUs31kAQOBnY2fb07Y15+QWZ9KL0G32N9BBsc2Hz0qC1CxqoiE4nU92t8z4mXYcTtqNXELRQXUbxWXmBiVSy/OWG5S6iuN554ypaLjObKKLcXI/W/m1G429XzZVlCku4bJ2CPg51e9Zl5zK9mYm8k0wAz8zMFq0pOAgvLiM8FmTadQ200CAsolpTt8c+A5MOwqPuRiUfUuNJUGjfZ/1hefSlQRB5yDCPxNEfv0xQbO8oE7F09cRiYWb7oZ5sC/Z0e3o8uROl40znEV1WWkYgb2r6wiqfFQIzWTouZlGiBJPsUtt4jFIFiDnf+CY0jMCBkbWWPH5ny7N9Exb0RnucB7Gw9PrlF88JAzNroo+iXtsGqcv7TCcLEjkD+T11Q7C0AVVg2BteTrktbW2ctxh4rWyKWIfZA0chpH3igrC1suwnnaBBqMO9EcAsyGdw8dfCKjnfZpiefhVXi3eDhKcy/fHR848jL2YYarJ3hlhuml4ykzK31wMVbmE9lWWbtcElEsG5qx4fwBs8Wvi5OnmkWtZUj0EFzACzHV77yxo7u6bY75FdSlCyeYpr7/Nj2OMCkIDec9EDZKZpfBOJnTBNrKnPmC9S1h5D89x3E47ofW5JD8blVl7xco/84Q+GHb3n0mEIkX9m7tGibGXOPLyJiluwhxSlo2ql19P6q5/Qny4Ht2xeraq6UUf78aVcf1a9W+1giFw9hFa/xcDRBB+WrKbSJtPe6cAkFNOf5XoCC3j6rxGA7OWYg1jsG5lIh0HifAxfyj5TG86cWK68Blz38vxpN4ObUHI5ZA9RAKgqrHSvCfrazS3HwGrselyW7Om8VIg790GPDYKvtz15koJpLGhw4Sw6oNzGTj/KgfHedulcH0FI4rmzjTIAEgXB7ibfWDr8Oy2pVX42gZYzpYqbi1kwk68G5pRHJsBYRS25xlBHCZzIeF3UIWQss4gyiNMkyQ2a1YwiIoOZ1TIMo7SYzGFsLaKgfOzbDN16GUOFCldX40i59WcPo9OP38wQdKUQleWIs2S40czZRL3z4WdWqgb3dADDQOddqXoCoQLiUxn5vs7UuCVs9bNI20R5hvRgW8BbqSDWwNesruYl4sHKfW/Jof9x6kI8E/c7t2Tu8Z5d7l6l9W/c94cN/9064jTDf95w0/mya42ykPCyaVP8RR6SLO/TnDNYhvkwto0la/sfHI0Dt+jpZ9aFQT+xDupn0HXVcow4FtoP99RxFgczidMvkSUG3blUzIAnHz60KGF4asdsOo0baAWH9zjRK6BodH+xymtk1SoMHfMK9oEFg7zO/THvLRgQZ+VXLMwHgV+f0OOXFjsCGfWcN57u+Xx+oPL6atHkbhV6c0KvS71XJc+a99b64hZ+R//ihJwbvZOarqT5VrBfpiVhKb9BkChR8vIPd43akOHm+G6DfM9oJPPJ+pqk2kwGlbsouaDXn/kPlRzUgbhy46+c0SGCvPellxhGK7wtzHV8z9kV1+DTeWIv4xJee9KEHAre4RM5qUNfl14rRJXv8JHXyDsVpuhcMiTcML5ZuCTTlu7drKiFPQvzRHsCL/6KhOLXEChxqZiCNONQbxJPSdFtn8OqTJ8De6152Y+SNCKQ6r0ZIrdwQC5WIrz5PRCArFNt6yDp0Jt1PeKRwzN7/kDBIrvEGrW3oTt0xGoMSKdGSoEMTbHjTd1bmqX9B0r4FH0i0TYAWPDHcS8KZ/5UvXX23TmoD+YrZZLUqvCLPz9KpSkblh669qBULt1xUOQ0NHcZHsbwjLS1Oyns3BnY993HwTBrXtrdDdEWyU2Ge2aGBT4oOICrKiXGWbPsnBXDl45hFKQPrcNEtuGdqy2xF9R/BXeA0Zkfz25d8d08zRxuCS710iwzYnaodEit+ULhAXbADj30ApNi/3QSFr0WLq/HHjnn9fVXy/ekeca12X1BwYmXOXaziSrFx6zmyz6WmpHztuBADqeKlGO7XDokDMwr/qzbnkbNi+GAIZXemByW4TSRcu6f3BHqB/QZstB3K1OtXk1SG1HuJCF+p69MBF86GrCSp4mRcoRRu5OFAvFVzHV0GHs0IPsU9oSu5JIRhBXMMgISE1/0SmoHKrxLQXkberKhY7QjaiE/XvfvvWJe+MMdOuSOLgiJJM42xPclMWM3KTc8hJpWmJKiM3HLlgxZUHS5CGkg6GRaaTFxdHuXlrWXmbcAJxfKIQ3JCBVZlbj/MzGhw7nPR9tZW6RRnEsqWcATiKixu0CizGc4eklB+btBocp0ROZD1zXv45HLT5vY0ypPnvaBhcv13X9gS3yTZH6jcGsSEIaWV9pUgur0L53tc6Aut/mrzCCGaAK8KDo+e52UaUNVu7J6N90oLYEKwQs0TfkQQWaSl1k3eTc/tg6d43RuhXpLddWn0SZ1/nGcatRFRfGVNS8Wcdh3ALnWFZzUcG4TuJHrTs1J20CX4JM5xPy/txWUtAfrXGD3WcAz4z1kV8xyYCZmbupyVNX6wrA2NVNHp4BGP17f5BUrkDRI8BQhnolw7g32w0cNMSzv6HQZbaax5rpp7K40to9NXUIPQX6GsXx0B7QX+DLT7m51DeUpasxsZ2FTTF/fWti26OI84zlW5M+6ZsqCtDxi77hqTHtiXEwZfMjO0Ll56knbc4vhqemVqj0svdrOnA6qEVJd0GxGRTCJN6govd5bZEAoFdtOZC9+bkmP74PThaU6VcfTOzTVS+nakgcwKP8PU/cKfF8XYpqDITBKUm2UD/CG+u9jBmhR5wsabKH2M2hZB3K519hWUbz62svNeYnG93imHh4b/y52TMj7NF+HGlewy7qzXLPycpwinItgw5mX/TdulzeI2hoRAMvyuk5bw2gfNLApQ0ojKVfPGuu5lALb4qoM3nAAMnMUgwJlmPXVTW0TWBWLVguVaTn2IJsrtVEehMtPhxM9A4q2f098mm20XUk1G2E1UQc0Un4KZUbfHM6/eY5cxx3Z1JcH2i4SEsAwAcPEhl6tWMpNeTHAR/uNfn1JO9HYrg87Fv6lZvGoETvO3PIo6xfkDpJNK7vIkSdmRdq46KPtfMH2cbKNt2OtjMmHtKXoJ2xjHrhxa6dOxeV9OYyt6TY7LG1iGPQWtYwAXFvtFD+3F2BYnFpPWPfhbmfDdkYY1618tfNbhhHwiP2N+X5sZAmedY+Q38ZM8jcWunCbVf14jyOtTfj5FoSsKFhzTEPot0Fuun/d5dvhNvd1dMbsvglqddlj1ZJhdHSEpV7pIZnBxq8Jxt0WCXNf5gWSKyoVLFZWLkdpCpfV8UDYXLXiesnRgrEj6J6qujTySB5Eob8dPG8RX7zpulrS1983oY1VI6vLWtJ9/qZXzhaf6laejQdfYlLX/yUPxFa/4zwyehptrlE0uhdLeEgs0PDF0V+uw8d+56GVJMUyRvvxiMPip7bJXftVuDEL0Z2OnBGFEVkqB5vqchoNvcz3AzC0pItcLoN8OBKremL1+VqUSHdhUpvGcXptOe0ffeas2RRlTXcp++qTcnhjI9OPLAsGpk3gwlTdeUMI/fdD6sqo3OYIVPFNDEhFRoIKGc8y3Ezy9vI6YiIv8E7sm7JwJaSK7nf79VM2wYyOtCcfJRrjteGYyYL3PwjqfYRR1pmdNdkoVyG2Lud54c5Nt0rwWvdgKK3IgP0x+hWXTA5qgZzP9i6PJt3HLWVlfCZ9eDFKY3Tst6odYZCnHOQXf9V0UdwfjIzvv+wXJKwVDmbDXLzIlkQO/h/iFcQFE4vui2uwEjVsKAanPBXjElptE5kG83g2G98pcXbwkhWEQyt1UjWi597a0v5ttl6Qbwjg2b0X5C9wE3gIFe646xERvL8/lf7bVMDD+U7s9k737wrFiJ1//OGfRD2mZe4UPkYjhk104vq5PZHp5C7cx2TWE2a6TrAqmz3dS+g83s2kxyByF7rZ7Q4iuFzdOC9gl+5PkZKfele55/bBVlOG9yt4HG3NsZ9hFW6l6sxmVpd+ISD8Qr3tjDyH6WWavUJhSB406denG6RmqZm0FAyQB0KiJTCUGd7cRr004YlY3JqZIpV2d9o8ymgCnp2tv2qYj/ynlFpY+xL8KOVAbFTjRjU2RuhqjM76jzyXMyvg/X5OcME7SbgbOTocjb3tV8cLdt8v9+R0kXBWewP3XhW9ovsgjMbqgGexxhRn8riC9xVu78LLTBCqpGD0jMDnkAU9ljk3IBeZgxl1PLM64wGnyZOFwQPoNUKJ35xMQG0hrBwskQXOtrY9P70CBCmvhRPGjEcrk0g1GShlrG1tDfjykUOQpjgOHwIFlGiTAsVNcxal4UfS1ihYqJWafppzkbsPffoTZUffHuh7aHJ486u6ieS0kA8kfo8hod5W+3Wr+QCZNG/lApgQV1uUjsfyUP+hFSPmTOoBXhUbkjD6mfSzlbwnoWIRGn8ppXWczcg6PmvuotjSEn+NtfrTa+tt+Vcw3QILIkcRR6FxYG3HqHkWbAHPwjSO8ct6SdkEfNxonla35ZVjbcmUuu2ERoRgA6uI/W0VhFfFOq7ta9j446jwTYZDLRobyQvDnv+nDfuHvTQbm9HbKne5VfPAaHmVZRHbkOwM9UsLOjX9hgZiAwDM2kEuBbvr9i9DA9r6gHDHT4PQTSY0hEWHSRZY01XLj5ges68t7KzYphJYDusoyUm1MnVOISO8f0rDW73pK4sZTsGuZzL3JOFTuHs5vNgfUixMKf+8b14wi8ukMM43kWq+LElYjV1XEtT+wO7bM5i5IBy00D7tw7ejF8f4qpChvNCw4thfR1RyllB1VDNwKS4LxD463G9ibOLMSeW50D0EPpG1/ynT8AM6wnzSTIktTXvvVytgsUOUbMFJvu6TkaZ988xkVaU3Spr3mRcVoQz+aglUNts0It29Gh4Fs3gba4rlIQo6rGt6cyc1xrNbqpL+f1ul9mfOxxn8rqsJPrtP+amr3vV4vG+eqrCvzqfx7IczL7/aHiPUmD7B3L2zCD54kS+KThllRtR1Ztw0CF2nhV9fay1vt0Bhhb3vzsYa44x7jUXkEkwhY7j2z2Th1E5geGhZ7co4mb+UV0P+ByDEiMS/HE3r1+vxQOho696TMZtCxxwxksGi6IjB05UUSY8cdO78KjtBFgN+EgO9TTn5bsIkg2rcQA8rB17fxN2cukqByOkOuRz8ZD3bIblpIMlym/qsITm36ZB5TgHaJ2uTmL8TJOmvlXnvZOfudX3qjc8raRsyI0Iy9but2mVzw9AjWNTutJVfo7Mr6ICKF/vqDPKzODeqeptDVTlf9Fj5doBMDmYYLFfVXRKXl7QxmrNjOEslPXK9rs4J/1pFe00EkK3DOPPAM2MRzln0Z+LrZONyiAhVyLmTNHArCMWUNbN7aPtqHrKu6jzC0x/eJaYIXz3i4z15GC2NBA90wREzElzkyy7RJW2R34G6M9z+MedDS/7ejM7+OVdaXDpMJzjDjooeJ44HcbSnV912wOxqkwNwAfHyLqUiwSGxRtXYF6SPpox//yArpesLYBU7DxedcG+zMAf8CvNecavSnfpQbXW+4p2prNFsgxZYRukhqLiPRbsbO9BZtXEkrry+E3q3HTPdRGVYIwsQh+MxNx26ZxjZF9kUr+5mRzZ2FZoFqYz56LE5Ve2OQP1pSduKxtrzq8yT6XzHWM/4vS6YymkBjnddqlPE2ib3XQwVgqVzqiqq313687P/1/x2bqaA0aW5YV4hmrJoEEcZzf+McKecKO9LoMBqKlvagVGB7JVLghcdrk7mT7lRQDckzWvJpf7zKaJHHZwo/r2d2HIvgN7Gjc6qP09bRbUQfEtkCek26r6crqM9oo/XONsddemxHHhb5zutjtmmjQNTbo1kcLUjR7KOO0jioH99+EIlWf9DBi7Ancu6EaIsqAzp9PkhcOg+jl9iheWMHbP86TI/2E/t3KTbDtmgXwsVoSl6juFPHQOx9WqPV1khsUXnaeg/glmvgFqxCPi3uG46uUKEHVGWDSMIPi+FKhbayuqZMIHPOnda+mTErbVAvcrkWORuOrAdETyMEj3A3PYujYtlSpskzjFXCt/Uaz20PIGJ70dhi/3CYTpO+twtfC93etMOrNUrV8kLVyFRq7FOayucz/qNdyy2pHULEUDTzheCTIGO/y5Dl5gVv9sOLWoOFj9K6WhVe01Kg7XFmH087DMjpRAUNBRMltIBPwQtry5eZrvmUruwVZNmG1kUG0WpCPfxdSoq5x1HJWgOhoVQw0JjoxCwQpaqy3ufjJ+DOJPkvfgiRng9lGEDQH1LCO7zKhqF7tbvodk3XO8Zi2EHgZcF29FCrcf/TBZGXB9lz2MW48uSys9zl4dFP+R+S1KZ8/GHbfBNslMauq4uOF+WjKrn7+vyalMn5Rx9ZIcQ8sCwvzzrG+IeAPj5sWZKRPQRCyNEZ/B47JFAf4XWR7iGbwTUIb9hmT66n24XdT+yAnPDZT/I8MW4oBK8/aDJCiMlQ1liDva1nXRjtqt5usWHWktm0zjlHu181UfbkgArwgU45dzIHcqAuV6Qi2Y+H3Fqv6/ZLSA3NRe0yM4/SC27xfz6RbTsNiukgIMoNlFHmfdTuDfeH45aQnRZs5YdRLwYsH2+h42BIb33MEi3UCeOEAycc+1dIRY3Ns7zmvwSyuwvMdZlf7EZO2tFtg9GOpno4Ux1eoPwacj+c5Q+2eF3VwsrpLRdqWgrx9nIQtdPy83ZfMopbAMNtOW6wgDDkfxSr0aIX065SR2e2U3vLXy4lIykv0CdWnicYarmx+L/4tN+Y4f8KtELmkcqpUamoKP+pbXQR6/U69H8/eunGrkKTYJzrrNuksZtxd3pp+twsXNgz3Di7d1Fkmzj0aol5pQ+4QjXUtZBJC3wR++vtI6zqELeJ1OZq5KlJRfaYdaDjSxEvOXE7IYcgzdql5/JRXirciEFUZc0FGqAIK/eR5XoZpI5+tDS41VxiuuiGqdC2DAOcUmVk9tqNG4AZhrOI4ufOES/RxAibefyiNzsl9QOqibDvvpQ1ypMfCjE9jwb29G8XdwNM44E8CKLcY4unUiXp0Q0C61kYKr9bqHFtJq89CVDCnKiq+DiXxRdNzOva0L9naGaRx3ukzT4qCr1c2acYc0ct440OK7WQoTrz/0e6PSDUoiAAAM22bdu2bdu2bftl2zZvdj/btm3MImYhxyaB6jMoA0ojMlwL5+g/mHu5UVN4ydaHqJezm6rL5pOYxUGhMcp4U2Xfz6m4L7PqOcD4dV1jdqDJIOF/SgrYrSf37gQHXe2XEzt43ULk1vF/x5YxG/iAUmRdVgM/jFy/0yACAkP5sgj3gBErlzyX98sVOPX/xrg2xJcIZ1+L9oUKqR69Zsi+NnxwPI4qhUIKSArTG1Ekx2qT3XPplWde4LsUu5rcUM4aOOZz3kHqBBqC2Bs1QPZ96AmjNyjjfpqtW0fUYh90QvLsGkpRg+/tGryci9hUeQgclxsTYvBzVxkBokyN55wy91Op4YVGjg19m+4QSoYN/fmc8pVerCMOhE8oSoQt82nRgukqjaJzBlLxmeeOwSvOAjCutPKZ8+FV7lOTbYLlys4nc9LPVjJeGn5kv1UzHU9QhqJPDbrE/n92AJmttvsVTeZMhf22Gn+WJMN7fohIK7sQjrjxKPMPa5CIs+ddzi1hIHivj9b/YKyl1P820i3xND27TpiDEeHbinRZlZMiD0yweSNXxTGgaoAFqLiXB5DveCQj8AV/tOVxkhc+smw9odzlTn4IFzgwN/+mnJKaZTNDy9EtqDxFsIFoqSGaGSUbNTSuGnMDGaj9cyU9Jx3njvpWz+pUE9BXdL4N7Zy5uFpldFBpdvJ5bA/DOq9/b6BBxQl9UABoEtzNkFuNkwVYpea+iqRq6yTOYrkPhE3gz89CZ5cWR5T0VavNHQ7zCOllEB8qZGxT8A+qQqPhxk7oQVOixqiQKwRnotKk06rKFWH3Ewrt+343eI2g7pprb/zmL9Pt/wDZTIxQ4bmij8BdbcltKPV0qbiZe+TOjecrC2gmomGfn/irdqcakPm+ARn5YOgrcmMdEoUCE5b8FnwLJNbG85m85l040tG7E0WOT7TTF8j1kOos3IJsxsfA5o6NYUlQT/cTpPrR6h2xLg8nBqUpHkx0qG3MxjAsUQIX5C9cfztzUluQdim4bSNnzBbqYlfBbuGgz0MYEJjkk+ii9gJ9ntxn1jX/zgQH4kx+U5GWLGFBEmAUKHckTmUNiMPQNibv69h3FxE8SsOKlnG5UrvzeI8BrWq2nPgI8tHW/5hMnY7Mu5cgEu7Ys6uXdExmd18X7KZHRPL+SdCZ+ZHs5mMEMxjSugHsUBYKxDe/P5Ja3SDgrASulUe88+fwnfkdnlHQYkS4nbkvbIAumDx8+apZcKCQBl1GWRn+uMXgiu4TSlHMM3FGbgh+fThR6Ke5PBc/MOgbz2IRQ2e0yicaF2cMDkgaotW28xQYQYoJ5EWsB4qNGn0HrDtjBGVR36/1zFeChvdDUDzisNTKJM904u29bX/jEN2R7jxozzUwFh4yqEnyQOrFbN7mKxmZlbey5DaszGeMny81eIk8M9kHNXAP8U7gO6Ie+GAuUX9OIwgy/qMbLw/s5cS8/dwOUulujYunSNfGqkHi+u+SCpSWxoMTv5rnhKjMzH/H7oZBv4bM7g7cqRykIEnpn173CzHVuqz0gwwzwRB70UzjJ00UXt19AS5i5hen5OFRfNlyp5aNeljDbL0U0lAk+NCLlSXfX6MFzfP4wQJi6H9qEO9DGcs4sleogyy3y7g4tUQVfAi1dhVB/L3dmlMPaoItdAoQmhY7DAcJNjVLrbFe66Yj4lrx0GFbaLReqeSXgwzb9r89sFE8cvXhTXUwms7HY1CsEhE9v+uwqTy3oxNONYmGexW8LTfpFqA/xpvdf6YcM/JVNJ7GvGaYu7X42fB1uPcnELsUbdSBf72gMhKmT6iHlNB5G0KFwGTv+4nrEJQUs9hKeJFjKYz01qjaEyNyjgGRAeIe6x7SH2knjXscw2YTm0kI6HezjFxWck5ShngyYon42ehiUHpdZrXS1y1biIiJw7kzeV2/ZcIRsASYJb35HBeELtXarkkXTFYjic8mBhvv1BOS5M8xybJ+cxc1tbhO6QH2LASp+XSTofc6PUIrgmH+gsbK/dv4Tth4rVkjpvy82tmTsK2uQamL2qn8nvsH9prajrltJrsafNZZFI/7/GtU2x9shEkj9HesVaNhXY/T/+VUH5qLoCvUIQRMERrKUYb5VhGP8d2pxGr+vuhuqYohlPuQDrqGxI/+yzqgC0o/I5SKVZkNEVWN4dz15NTdRQSqrBLxGf1U4CiCLTeigBNzTmlR2xMWQv9KnLgkX097+Ra/S/XVCuTVtOgCCmnD2uKz/8HHtUckWSK8/vU9zQKkXKn6YF34QAXPBoBahLAhR7ptdoMtwkth10gpelEfZGsZVvdqnErBpvqTxbCekCd6WxEFDBltlXp6+VcWe8mgviD9eAFbrWLa0J+8J3odc5XaMnhUq4FsQIFJN1BKqnizcnyElRiFW2dHaLxKUYIADlomW3rj2uDzSa/MyIx+bXBiysy+qWYC2COJ8/0uVSjK7IlnxPu3GgMvLZyEyPyc7kQ3qRbee+4dFB7LFQeJINIKZ3iSnHKhxY3QMmCH2fPGDnS83q6tOYutByauzd6FCVty1KfBG/XcMX4MkIIVWIvKwjXCDl8CLK4ylp+lecsAhrl5H7nKLcaNqeWi3hKWzq4NgMO2ghAdENdpfYaxrZEicAqohjtrU76jJoiVLkmhIyVsOlDt6zVtenWY5Jn3XAfxK603pE+KuNU0BBrfmTpkZTcKB32EBuXQCu/zWEGL7iwW3AHGibjbGfuQIDITqb2+mz0Kuj/OwLJjoBo0766mfcpD6qXjFhDoFxG3H/wBIyPy0Gsm2AaranT+6gZELx3TDDhzUmZTaObbKvdREvboDA7cAMP/hDKDPm0WgAXQaAXe5dMs4bgzxr51LTn7gdomuVRw0nE4LWxSnSKeVeO/Wa760R0AMTT33jukA1MPEqciTVdcU2F7v+bST2Jr6KueOzPzIrifVLyRSDDpB6yuDC4IGHkeIzSvYirQRhz98qvDcBHZ5k8dG+WQdDRhEyl0ra29RIOH/zakAvRpOvItVXjuuGsyd07CUATCDw3awX//A6MYeRvySdLaSXPLwY570VJmnHk1Q90u6EMi/nnqg88cNFGkEyZUAh5OSRk/UIDBXLkuHkxppQvivjcu3CIxccDZjVva9mOumvmtyI7DmPVyr/rE0/WQ/KozI/zWdf1i3mCXwePDq/k+a5kzU4RZtVgZSLWLksDWStaWAFB+X71J3WwzyonbMnI0NvjbF6XKyWHveXClYk7XeZKVrGu7MmfykFUHWU4hyCWovenYhoK0zPPO537lxUsHkbb/R4OyZ2VtNhp6QRtlK5+RZf0e7hBEATVHLYQGzxPceMRQ/k/gsEa92p7EDw5A8hDPIo/9u1D8uwrbhYbuKEc0V3IoZ46f4y6YMpZeuULd2r+iEtWMURmikESb6WKuvGSADY9PN0st+gPbz5f7wzo3h1TNJavrBIKgoEIm5eprcDRJm82mqEoCWq0D3XMZ3aXLMaMCSjJx0uBm/HFQqPbID+ICZv3pNmPJo1LdVrvJkNLqnZG9NSEtWe42y/tpnP+7JKDQSdg5tVY+pRBDXrWb/xCWX0xFZh/AOkPOYi4jAJrGN602rozCGsmm4K25W/CohIS+/JGBXDQ8f+lksGXUFx2BbdLOTbhGaui30jdzmppIxOEqsP82BskGKU/bg9QbrQvqe2lFXo+J6Yz4gH4nspLp7DYxMQqCghYkVfRoqf5Wzmu4Li2dDIF641XoK8D/L6aJli5cr2D+yh8zTtEfOOaLXNLvVcfukt5AUeVXQbpVzJER7m4uCITQKuDc5N3cD4K6xImLNcSjJFokNlrwv2wVmUf7k2ouoJjhbWLtYD8nk5PJlcmzt52o7HVmbh3xrwAovDfXIzWBy0/4MSUDQIZVMHUd+mLqKT0TS++uCMsP7M2x8aQ5KYw2eF8goX1EVdxvgSvZBboqIJmji7vJ79brnAqcbCx2Q78js36X0lUwVbKTyQja8d6UL/819b8G2nQ7vNG3d+qlKsjgnHEEwGAz9cY57iYlSlYsklC5QLdx3z2/KZ/h57TEOs2VndkwFh+N5KtXLCbQvYGyUrw/u4/4HciHADAm+78ukLuwSUdr51LoyS0EkDV44RagUCj/9zx/Cti6JW8nquEgk9eFQja5gIU6aqAvyncaMhYYqIbLAeirQHLZeG79UMcphzJKBUamvL2OByJGXL6t97p0/b5T04fCIwp8hh992+0tPXQm5e19C1YeWEKlL/v/ivqmHG/O5NcwwNolPdeSPDsov2tUXupByoVD/cj0TpjVV8kJGZMCXtsZgGyM/rq5+3J3TUUVLaWFdKIwHI9CjY4QCSQe4Q1JGVlMWP9L0unotB7NmjVICdci9/2znUG9qbkxt+lvz816DS/mmLWWQTdGpuVbyAC4BPQHgJDWOhScVGBQOUO6j6I8syljcRZupLeOHU2eNdgVQ8REF276N/nqT2htHphm9RD5iYTB1dCCZWfGI54f2+1JWGRXUnXS4SPFPqfqIeyZTKPb3ociAvmIT+FWmzoUuOd09kPGqAfg9djQpyuJHuOZYtWco0HRFqNKo5NIT/UEakG6hFAd2uXbz+ivaoGyeV7jQULuPAJkpfftnUmNgjbciYn40FsmBCi225pJ4Q8N547fvJq11pHwDVI5TxKw6s3cvsowF9BIPRdb93Yr5ionztSRiAztjGCH1rGjgjDwmnlryHC/Fl6uYfw7DJEZ3FK9gJGEbZqed/MOIB9QE9uxJhE0VBwdo1Ik8CEv4eh3cEDqN51DAh5hxPLetIsWXghafagj9eB2dhuJvfe0DACq+YQz/3hGfo9sZC+em6CknLSmpFVO5IOkHH/d4gneY2r+PHpCxkr1Ozg8bhHf1vz2Pums5/1W6r5aaUHTykEuyMUe4UqlAytVWlYXvGTq3Mc7OCPb3mbVA5CTYWRg+s46r+xONXZi63+WW9fQKeKzjvE7Y0nRSFTmvRZ2hSD0dTvu5WphcI8ErH2MXNk9/AogLL2obsivSVSurK8r5wK4/FvJD0mJak5tcLgrRaPo/k0Dtc2r+J5wGyLsHhmbO4Wn9JGz61kBjXPDXZcrZwCNmOz1Ts4fAVcJB6tx4vL6hJ44w4quTsfYyStXMz5f7xCmp9Y8C/mnNMGLy0l06n/Z5rv0JQhYnyv0Mrahc8UGT2kw8zb7uEl8Ni4HjmnZMM9F4P8ZwHZOo7TDqCIxQU5kyHQSybyB+pZJEUfXgE3muwgprMX8p7EIIEtaWSLCrc2WlfcY4E+mP4A+w9pck3j5bFlkknQDzvx87W1MRkIg+FgJUoyoLOBgKYceK3vF6oxi0twV62eyP0ALyI42+5MFJ+ZIqIXmq+XEH27CfRP5patnIzYUknc1fjjQjoJRziKUlf5NLNAbxcAYjIQu9fwjBj5jUTtjJ0lj07p1J8DHrBR8YtFR4jt51zKc7iL6cbQjnNeqFQ7AcmmbjefJ3mFFJg9YEHJ87zMD31vTyRjmRmUaC/P2aHyB62xjw8HfZwdCnS7TBWa4sDDlsG2OWJ1wFNGh19CaxndlIc0futidqHFJk2XJFHd04ZmWfnU2fpWawJX4F9eNPQXjUCA2BB9DkC3MwFqcwSX2mNAIlwuB1xJ+2NcAc9zs1y3Ew1PkndyZorUWuOUCVJpuKcCSzN0DKb8cI7lH+60rNFhEGMi4mzfHP8F0ELyomDRLSfWpnGbToHO6Mdvu90cordhgUEiu+Mj9PZjI/CKaYja7zVrsXqPv/sawSa/NgZ7rEihdrBSQunf/JKXQG40toldxgDpqN3MBlXDHGwRgkpyzu5tsN2fcfVImHFaEPEXCXPC124ilCSlS/ZzlgWNR1FqDBIPmpjpBXWw2Yx/NHICs1R/2+bVKo15HvxByoICiZvxE8CXbM2fxWp/FslNH6IYZhxaYnjOuZasFkB6yxr768AT9xOVzqeP3tlqWceBcPbLjvXk5fuyrWLUbmV2odHum7Qlge8hHxNBwAW4hgDGJGVjKWbz15EGbkNvsXBH4DCXa/mu5kSUikM9ss8cFhvkeRelzJLzBrNtmdd30X0gSTRNJpdsfS7UR5PHr6AjEqd+REjCz+mbtvqVG32WROXn0GXdCIywrekIV34Pln7Fio6w4euBUyombSWJ/Sw/22eGZG3a+yExRe0VwWujCalMbJfrs0BSkMNDmf2l2rRBFb5PF6VmYaU59s+T0H5pAPbJtNaMnAJn6PlOjGdv4GSJm2FJjsVTkOhQZ97MhXRmjU2BRNgmZYOQtD7+FmKWM4RSFcVb7ps1sLrl9KaMlkrh7QuAJt0L4lTthBXAz7S6TW9as/0O16gMF/3p+u7LvtbroTxQPYoI7DCIe3EOPu/kmQx4jsH2V3TvKXLujCug7ZyoNznpCKJAaecAqkYvk7KwOn+5RvtqPxpQ9iJqB5ESGo65r0kjMY7PcW8sLhBi4Y8NjD+jUjgX55BDJPOymj8Q6bUD0Dwn1KA78skeVuoOxrdNBPXDO2wGBKmEaGhp1uzNOClOHz3ZgTeMgpDwgj2JGo6IHjSsuz5ivPnULrOWPy0YLgr4cWlR3465skxA3bClVwojG+eCkB2S9NZ4qUAQt/SycLdF0PwJosiZx7bOPoHeiVkL+Sts0lvOxpM1VoU9JENNj5H8FdYgV+JqlqsK9tLzHWfs6ERDd2E85NqzA4NpAR7FOvvoUEzaKTY3rzQKnba+O1iAVo0y8qLJmu+wmTnb4yEn46hU3zMZuNOGTKCvEOt9PdN/lVp1qS6GRi6OBR6VdqF0sQ7NB4M92FjxuHJXnagzRP2kCnn7mWDJqW5ysX767IP3KcIgOzg6vUOGyeB5acTvw7cN7fhQpbJaqnk7Vq/IsTaKKd5GEv+vhYwGbv6UynRQLhnNg5lXg1TFFm0quK2TnJUwNFgLPTK7aAzxydiOkO9kVOQeZ4P77Atvv2v+Af1ex25hDJ/lgxkrIvLKCimIXbnwQZYl9ocbrkEIQCx1OoZcbxhLjOsuiVuNCeCy3wDG0B2NvQ6vu+corXEK3iJwbuJC54KDstbMKfLPwyQe9dyFrACDthmDBu5c4ZMEgvVJlXdwDb13/bEkR+D7A0sy4Y/lLE5y/WUpkBHX+SWb0PKZcu4uCqfezam+FdWBVGltZ5E3oONSDfcDEBu4yICbny4pw0cPdoyQZQl4GD9xJHOPsALGGoPrEbEZFFCi8qHF3l+G4ESup1ukel3HyHJjgA5mV+2dXlKAmCWwcSZ0xQWPa4avM95m+yFYLnD89kPioIEV9BhC1GV4nnx5YPBEH9Ro0EondEki9itX8/h6y6Ijg4kl5D75ZAgmdT0hOX4HPclvGV9l0xk29r6q7tB5moGfyUwB1NFJoYm09aGk9GSBd+3J+EpLyr9Lq/wCl5D6uahnYTHzTZLBkj3FVC9FWtL9Z5SDcDngmxlvwg0LR056rR5fAKZEweoRg0/R6aiYSwqniD16K6L/5vcjdimsf8nN3iOtgPVu+k16nZZ5YfRyquBR7GGDS78mbD8do31oQGgVQdhc/MVWKeAK4jX3t3uPMx0SoYiXYioBCJE1Xb6yCOU7wp6wwYVHthX49MmHll5/Zf44SmTCzdl3lmqXRlf2HGFAeCeg2MrPa9UxKDf1COwgCURm1O9CBsDjWbNEw3DxqvgTbgAuSBCHkrC8GcafFiiCTZzcaOhbAU/k18RtBNg5RD1NHco4oj0nQ57zPrSgz1Aus478V91coFDZmye9mXsVBMLXeC+/3vdtWUkvJHd0zTzeDYCRRdwkRU0ZV/uGuNuMxNCUlCa6w81xb9tMIRE8iYzig2JUiHbVrTvl/y6CcLEJjlkn7XXoqIzCL4w6LKwOArMxZb+kuy+fZXG8fvJmqdqfx6dxQkPfWCAf0Cy3FlZ3ds2Rd7zhdIoyghWHTf/2bBav9UQQy4HBCqnzJim2C0ASUtsc2qnAcghfZH/WqSDG9LApduRxzGKzUs2dIEfaA7fiC52HxEACWUdJC2EX+gpp5fcZuSYsIbDW+Zxq4TnJJJE+rP/wWS3zZKKNoZottNfh4UlbUEYQwvpUJWan8XZZYjGz05eAR63XRIkA8aA9hcClSA70gJEU+QtG8IyoNXPrPRRMr2EdsElU/oGlpZZA+8rH09gg/x/u2P54c3meKmUJLMUCnXB7RouwnZWMzcZtwMyvlyo/d0yQiZaXteKr4cZ068BKkkbis4nClAz6YPGHVCN/Ub1QmS7N3tdsr8FpRHY8QHoYIPyaGf6LirD7FBJ9QYa7lFgwrbGBOFWkepRwMTNuAN5Nle5WXLEZhfW0qZCJCvBkbidHsf932ez9phXDCjn5n3ZVulojOcGEQpzcCeirkyuPOkG5v/WBf+Xp7Dv+1JPI+jXWYS9MQrE2qLHYe8UFzOi16pcmPCxdz2pfnHrJAjTXQdBd3Nt0xfdo72MQvQVDORvplpfce4hr8p5om2vAFlHG2mxTdnVR6GE7N5y9ht3UKT/JdB5U1tPqUmloNmhOWc/k2AxC2FY1LfcVwmCmgs5wSRYAQSQVr1oZdDQMjfXTo5Fh44MGD7TXmoQnJxL7bhjeN4+fp8pVE9cTt4KAfkheBn/uWUdd8LK/1YafUCaG6ZjEo7N3oVxmGqgjBBLJdw/DKDsglBR3DV3nTtfBwv/y0dDHzjpdmwMjB9i1h67v49yTsaepoQGU7EnFtidp8aRBxaBPVBEFmAl3xaToUC9r8xJ5sgAIulZjyJJz/97ABMX6zJqwgNnuWSgwg9Nl7eYnvo8jQAOup4BnOB1ojEoowGXKTchBKFSaYi98SiSlW9inTI2Xx7+KlL9U5MHEQtv+JVX0dJBVkwISMZqB3q89j8lzgXUOJ9CUjdAYIEAN9QtgkB0jmQ+I5c/YlgtkbepX3VBGgQyGuwpmMKrQXW525rhkEIH5/zQhPMNjfOvudYM5ecwTWiZqrgGetmYzZoej0/QXPuohs9x6fZQlRag84EG4R4F8FYcS93v1aNtMJGnnGV8aJCRN1Ij6F8nBAQJPc6vff9COfPQ2/Iq3PhwJHY6BULvy5ET2+edwUax/y3gKrZpdhNPrtl8Y2dEbre1qadParaHZw9Vl/REsHixoL6hR+wPEQeaki0HxRB8UONZjZ8m7ULNc0lL9rRVm9L/BrL4G6zd+nDDFp1HUZPMxHdeMA2amDhF6rh8hNZ8s0qh+GQOp6GcaK/ffm88TPvLA3mVb5j/ovegWSBUzWk031pbH9PTK8lordNiNLr3in6Q7WgUXPKV0/TNnyVrSUZkmRaFY57ItaWowlYiahGPgKnSxS2gG9uv3pkjxj+b9nv2e4qMS89KMyam42F757OoX3qUg9UVUAP2d/a5KgD0vLxTX2p7Up4ub298glKhSwSlJw37o2kGOoAh58d6bGPk1+LcQ7u47MR7JFDIIoQeN/OGm/vwzin8ExUXXXy7zRdrEOETTsGlZSSu3Lz9zF2kqSNa9pA7FmaB86qOzxK6o/lwZQ/xAW8FrhkQD0z1cFRWS1fDpYrKHz66CbSwI5JzcHXoUYhWD+iVzJAUbJG+tfPE9stdWNB8mLBKSfpYPIM1rex8o4m+44M5zM6kJxgvLbbHMi3aSQcXRluFD8KZ7SJKjSwvnmrA2C8iB3DiLOfARR0Q6BU1RTCObzIxs8/+k8xklm/nBEWoxFloB/NAan3MUZA02kkn8/bt988Zj9Svw8Vxl4g4yxeJIL+Gt/YU1vNA2B3HEdXAxDg8tFpKpUhx3UWCgd3T7N1nug0gJB2OevdZhWcnVz0Fg1a7sdVK3+Ja0CFNrijl8pqbhjUbsJNrbEO34Dv3g+rl9CCv+74VmuIF7vHxPTuzYKmabvaRiAGcMqLTCUP2Bv71qnggJJaYkjNRXNAVys7xFHhcwV/oi9/2GFmqXumbekg9l8jPm9/YNORcow5gyrBnbMj+/sSwJ92bqaFPL0DlEGN/DNNN2Suq+o6LW7r5nmqHmMBDhJ42W4gmfgMxr861PaK2zXxbTPoCbP0Av9Dpn40MVlAIr3Wn6DQQlnXs+SFkwwNagUbvf9Zf5KUh3/7vo7JlWoZseOEO3baR/DAYE/hEoSvmVfe+SFTQpRI/I9l4RPRO+tDJ/3OCacmoa4To32lyEmSwpCHuC8tegVSaYD1un8HJN0g8ZiNNpTMq/domFa0UpKwh+/oTjaozRL02z2wouXErZLKOfyyB75oRg0h1O6zKQwcex5DvopqxfQkYPiIt3TC0895CN5Uel2dyO9Q+n+I1/QoTQwFm88jJWL2ualcLmecVtsq5QotNKQrl+xq4wKj397aDwx3FCOnPo5zccNRQbGrR0FXPaGrv5m4oNWbU7URMG4gx34HtzBSAhwwXJLtL1bpQClnU+MTU8o2BQcRfgwtNjUsYww/IkKT/Q8K4E1+IiynHyHP339gny/nHjiTvEUu88maS8xlX46oAyELIMqPL/2Cos0Z3AJfEEMap/bLDcQevl7kmgNRdZMXj8N+awUgbqQJaTXvXFiN3cAz12UNUal5r4omPvI8K/tfQMgjv0Kc7vs8KyDDzbDHU9S6TNYDK0+2ERLI7nbJUpjsYvhfGZdhFYzpSpb0l2twKWzMf5DtQvzGq6TBt7tbHy/7nnypDiGFZBoaLfrCi5Nj0zOa4OIkHPM3HcVvsONa/ZN40ObGvS+GMy1MxXidCmgr3257IazVA6NE6xmGwmI52Jk8A52JVXLoiAJzuqjfyxR/9zNnYMpZ+rgn8a4IcnTwxzOUGrebuyCk2baq3VRWJ94VCgqXiRIOEJfZbgE18GRNbb3JkkBV4wdz83+VaKidKKwZSoRqSyeKVV8UdLLzLoSzgXmXZ3nd4lL60L9j+JVGmDg3biFo66yKA/52EJ2ITSJOx2se9V/hsn6JFVRDIyAQVIAJWqpJMZ3qJopyo1gFN8uptdRmjGLyY4ig0rGHvV7WcGdCtVpo2PEcC1RUT8fF6GutxkjFmH0hmdXcvKg7kaecrEOJ6pTqEvvjFJbG8HZbXXfPU8HQWs899iy1W4beJMUC+5+nu2U87eZ3uo+mr0ePDdd+UZqYNclDkO7LWesM8xxGNLk3dfYUWk2lhqwsLwQJGqN/17hmKXaOkOvUk4nhoQuuhoVFaReOkpJQguPZ5RPsAGO6EyUIaKHEA12lEep9SG1ZZJbYCvycCgTWMO+00iGDjnA6xnEf06DPdcF8qwbsJ3jJmAMn142LHXPrUGCNAnzT+8w3MaK/hPBleb1FMM3wNoNiQyh/KgbpxjjYtHg00AHBPqH2qRGukYDKFhklIjoj3DIjbXN1lkV+Hc3TMKma84C+CuuVh4TfMB9YEyMAceUTvzU9IJbWQu5Cj9pvYq1bdyXB4rIfTPTJBjSqRfIGqH60I+PhHrtVFyX2VyTa3X0KAgD2OPFHrrXkZrezzoU7d2+22CjfM5+ePT1MR3omeKJOa6/0+NXh2h3YafPrmLhhR4BId6gCOxvsGUXYVFUzCsZmuLYAGP0tez9tMq5YFmr03HJBOP+ecNIQ+HhpTxx6WuX+Orl/QQ1aZbMJDehfB6E0dRhI9ykUyNrnn477+8ihNTSt+FjAzrO5hCpnM7YGkjEFyDsX60wS7amZSx9iVRqRHfRqBAbGDwRaYfKQXihGZ1LI6VH/k8XwQoQOu+Dm6wRxBBRJwAacZTsOB8b8YZOMiJI8VKSH0C2SKOkEF+BqGAfwofZi7IJxWrJdLW5R2ofUZpXEbQdr4NW84IxADBka8vgCBMrn4B61sHgw1UFJSybxXbQ2FiLaYfJdRWj3rwMT24y/ml8oK7liZlYeWkgMz3zEgbx+ggOF8HRqQByyf75GGIHyIs0CbyCxKpkDBVV8iVsZWGEoDe8udBgAmRTJV4FKhqmqF2BMTgx2Hvkzbb05/RiNLbZLT+O8G+XDZ058MsF8KChGFWTYF6tdnXKQglhiZgUnIOVXi2LQ09ARjnViV5NBAdhGo7q8aD+gnc9NDMCVQA83vCBpNlftqPEv9vkrvOdSFOjKnzVTAgJIvY5KhfuKug3c00kPHJHSunxL93Mx466I0nRu7zvK8uo/D5DF15ZM/0iEPIwBr6gerOLeFc09y+xT9IpIqR/97DnRWeK29LhJr8C5EDaiUz2zNTp2ApkcGiU0s8a6lMWu9G5L+VXe+RNyBl8g+OBOASJ91GcS3X3DmRhctAsGwdAGtXXr1TLBfKvYmazpM9kMnNwWr2aF3mt3zRGnr1fjFdzr1zd9UcRBaVdcbknDDJdnBMpkeYrBOxJQy8ZAZ8EMrC8xeoCHrzqxSwkSDpg/q5mrAw3l08wWZo46swTupNiIReMGR1yBsP0YBXFmD5CmvQ+F1RwoeY+FRmS8RJav9pKmeIhf7ztnVO0JrxWja0BsMJbj+GSHo4/M0FWA9Bxk/USk8czoPhPwKuyq76PZT/3PB8C+dVeYj042LLc/IZ63RPjH0u3T7G6sgF9vCytPo36murl+kKYF6JyYTred6KhQs6IA5vCu3U6rGhadmubtMksGAEK04PcANk6vvR0rKYN7FhSQskDTman+6T+Fi71gorapNkbrJroRBqAtQ5QjxfGfcaEJk0Lg3RBREuGEDQP6nx4zaZE5cUUxBFxi3BoEngBRvSa3W4W3W5pMsVIZ3TusV9irIycEkLqqcC3hwIqXtVNLRoOOjmYBLTpcprxBVcwdsuxqgA1UK0akUDoDnpPvsbv+vbKA5qdmvPCfJSgnGhE9ua2l8onxnnuEeGnNx2iYY12AE8g5kB2HA0UFU8n9o291HVgiLodsLxcGpUU83NObWoR7uiQefXkgHZSywEPi9Uu7d/H4IUzQX7y1nzSkG5Ok2e9T4H3kFQ36unyMO4V+d+7rBkqfS2B+5+dQXxL2gPopJBvtl75cHcl4v4OJNfE+2j/vUt77udVFiBsyywchaHdrwNcv2RdAU7HWWa0yPvj38dAg5f11j/K+jCruZ/ujOizwhOqeI/zLM2bxpS3pSb4PHrJ96Ux0pINAVHE7ikqecT6NL1jtJAsxU1bxGrZg051j8/WLCoCohZP/UgfdE/10lp7srW0pAytyPessViTthQSTYEHOlMHzjszxAcnONLp1LDWDhBBh3yV/K11ORPI/5KcA0aX7qJ8+ROzSnaI7TH5UVs0FoQxgasoxJTQnAwHYUnuEAZO9zvFkaYghystFxBFxglTGmQp4DVLJ7/zPYT4PHGpmiq0YiPcGswsK/y370VkG3LYW7dmdQdhEtcQaX6Tf8RrfWtN60BNKg6LliJAZNuTbYMCM/XMeimcQ18Y5FBK/l5ZD4HRlJI7MDWKwZATDX8Df2Lp23Ij110G4/uu2PtRdswCLnEtdNUZKA+1J6hXItZpf2p2dK3WU/XiDyGDOqvUUbOL7HmJiaxUzE3tSwx0bBOmat8XCZl+GOsf5BV4oHuYubLKpTrulrm4DU8Mcdh2U8fXBpPDFRDVAfFybu22Vs0dQ3HA9uuAdcjC80tfUJb6ijVS2PV4MPtizL3qIOld4W8N0iMRBacTp2ZREx+xtNk9f9oWWvfj+aXQ/26IvU5O9/Heu5My+rfFNSJmcO6rC4m1DUIkCWdFityFoZPcCDMQxjsolv0329ovs9dTpkh/8rQz6UkYqPyhEin7qEWN7E8/CeoVtlXWrCWTLWQZdcFJ4YGJnnwkiNf/sgawe1Pq+Bg+a3CuUxdIuhnTaVOP9+QvSSV8lkoax3u1az+3CLZpvXKxIdAAp41zM7K3jIS9cVkrprc8VrCMMjw4RJYKvQI/g99gj3yqZQf1WsyK3vddocEO0P9JB9zuOkIgoJfDagD3bFonEGSFqF8NIz1HiJ6vP71i3KthQlMGna8fmPUa0Ps2T6fWylRNTe5vuseRvXgi/ZbkQ+I7x0d4lgwSQ5kDmRaykWaTv+WbVUbvK1ucfLoVqsUw0+aa2B1Z/prdKyVchJ/3nUC+i9mR7P8D1YzjHtAzCUKGS/31+KwHhnJVSiXZj+PG7dqyN4nWIw2/R7p7p0ETgg9LBllG3EMejGbx7Q35ZPgXeVuNKIuVLzBJuFPCEXaZmZdB8a0AEbGXMYPHZuROrtSa4TqetYmCR0tyh13ZO5bnpqnjsdCnNEPsW8fHbaQKDU8eW/afAKXSVXv4PIQR+fhMfZiJSBLR/LrAezilmN+NE+ZjnBspA86mF+hCnR/VcS+jEMSSyA6ndwfs027eq1UXPbqJfBrXUWMSO8fxR5QghZT/IB6ne5Y2l18IQiPubII3IStP4n41ZzSrHe3kEmUD80/dBRfINOPz513XjagVmH1AC/Z81DEJanYkd3oVxRzQu8UmCP0HSOMm5sDDlq2WBnCmuedhM2i1t5Gj8WGAgSDv0L0xEOsmB2gTJ5N8Aou23Zcx3ZoM1T84IK8WCSqTxrqKIT9TI5VSS5DQdXD/e2n8TYTj9IQ8kbiWCoEe/c2GGMLfN879Ky1xzeFKVRCojC3mVfl8xRCM2BJlxeAaqtkZ3A/czPsni8s2tYM5rqbF9Wwp3yoppbBopePYmBvyFfbF7mGPB5a1QxntCq34NHDqPATvFnz2wLogQpO/5pM38ahXrzCywh1ldFK2Wgdv/kYNW2kKw9u6PHtUuAYiby9Z3+dqWUv7Rp9wieLQH/ipmsDklTzYgW4X1mKyjt+IQvvCvl17q78/i2DfwpL789kpRA9g7xOidbOzaT8MIFcq1xlarmdI+BBsATFvNizBP4sUYJ9oYC5zL0wIULUg21X5+9cbs2XvC672UDjch0aG2HcyE5TdXkPMse14vpt/JgGdyw13BcTxtL3M99N1m0zX87XckyC8h50iWoKXMxXGiIWD0ce844NPeKZQEGozfvyoRtlycoaJYFKlrYUjkck4ZzvkotEXod8W6VkzNxYZW+/IFhCa89HYHopojFj5UQ/Z/OGtyG9xl6y/O9ZA5sQfMSrZ3KnO808Z6EOTONzvT6Nrc3tOYe43SB9vV6ysh8NWavcW5exhws57l/rIERp9G0BjbEmfqqn9KMyNMOwmTQBydrOwVNB+cUh6VOd7rLhBRuFvQ7pdwwFghDFFxJOTG9uLnPiOYe7VTxlEXabM5KuIilurZkPU3bKkEerRaN4ZtuBbTrcnICqPrNU7FTGytB99HGSHp7rKDjWfnqdGKudHKVTYlTZZ06wyoTnx7leIoaOMUgEzk6s4cZwIss4deH/R1kWgDL7nJDiW+DWmPUA6n0/QVUPwWEiTXUiMgmdBPd3ad1ijpVvLV9e+u4qs4L1fgXhuNbbhwMS8Msw3yaUkN4XxYVsatoKz90zD93Hkgb+EWuWhT8HHR3hVjYtQuUhO2bIp8J39S84FrEgVxiF4emk03kBUxsjHOm0FtT3xAQ1YE2Yg6cM7Mn/kTNTAbPKPLUmC6fOHiS2lQlp1TUSBcJoRF7AXfufLSN5Im5tCkwdP+wQ864Om8D8YcGOV1UhqBMhCr8BQfa7X8NXSqpu7/k5kZFwak8k0BFi0h8JpCndsNe8IjIXWLvvy9LNI0pxbf83XduNiwioQmbv6ghMn6F3gCqtwH9Ny33cBGdHjllFhhnW+7NiOFl38oCS4z8nhfD2du/LBwgt1llgMtEupBTsDpp819GK8z7VwMsj87U4F0tWYRymOHPs6mScnGiadkX7L5+PwS6LWOzIx1Y3o6xdtTNt8PAridxQFmLYWTwCzYREfRPlY4BfC06JkzdCMK2XC9YVuXsGclFHcxKN+5mkS0O6/iam+fvCdLXTV6c/Pkk6UXFO7EIQlszI+V0sQ0szTTUW/sKq7XPwzbGXkDt9XPohtAueLi2kHLdpIwqzPSujN8f98dpHVOn5xEmPZdaftdflw4cvQ5007bGE3VjjGHE4QCtmyg7SF5bmyIwO93OcxkvRqDo10ni2rkwwFj56WIxrqkKSjA1SciIeAT2P/pdCx5pGUb7Gx7WHnx5EBdM9hrrX2Iga3NHzNzRx832zFhpJ2nJpqMsFKMsO8Fo258n6P5T0dLcCv3wYQoJiBhu0uoirR+x5zq/AbAqwzUjioQnC4HmOXBXXBtQ9R8njo8aBBKuWtqnSetk3n0+WyyEEkSEju91Q+RIxndHiTzpU5pNIUBphZkA61oZuYwVwNLW0bU5nDkpQtJyTZiG+5WnRswEZmqm+4dAppHtpyuet92c7YL6fDk5zxFv3rUO0CBlO5GK0ZEcfgVe0CVmGZ9nUxUqlz+sdMje35d7Vl34aL6NpK8n0NP3UKqbUSYb2bp6iDWS8BS1l60YwWVtxMZkpRzB14ugIL7Vysxj33giDwRtphm2G5PQA8q+eaZzfvZ/lyVlbSdUX60lhdhTP9jMloq8Tk/KoOqiJYrzdqWdRvqbZV3BKD9LNumOcfaA2JWqDeZWOrJb6qa7aEX5Xq9gEIjKSCCecaFd0QVEJAXW2BRMADr6A2R1m4Ng97byvOHolfPfV9vi5jtdlBoZb9yIrssS1SU+5OYXCU3ItWWM5lTUA3L8Mg0C28ubof8pzHpwTZhitiRMPTNYTiHR4o75f09c9MygcPY1TEMc+ubwzX20VARlcebL61R0EWZxmjm17fTj2GnrsDUIMFH06ds3Pk3rgXDPYXYEH254bcDrfPImG9llRVFyXuq078PSEOmy2VnctgvOGi0jy4XteZ/01NYfm1oKWP80dn1GFrN1WbrF3WbPyjj9YbskCXp7lN13hXz8eyFdR/kH9wmkNHpzXZfjU29ezjNRp677Ssl7/wsmeB+3YkZlFlsmeGudhEImuYEgzrxCv48F+YGKNfYDGl+NNm25rKQRkN5FT+729JdUIjPkFB7U5PJNPWJWs+ihvc+L9twZuoaXsbTb5RCHPeXDXDp0+BHWiJEgVmTHY4VCIkB0ow/aTNnDh1F4OiEnOorCeZc77hUNrhqBJ6GA+j6+HTdyvf6DE7L/UmQ79I12lrj8zGDKee9dtFmqU2PZKIBYIgYSDOt2SdSjxwxWh+iBpWob9cn/cDfsBC8Hv9E7P/lJy3Wi1bWeQXBIniv2nM4ktAiOH5l5/k4qmD124fIyDQOqm93DiX6CTNDeJI2qbo478hqAWchjTv9Cf1GRBKAcYW5TYloSxiRhl13WEZN2q702ktQFUP0VIaPKR1/vwfvpEriWnajfnATSjz2UNr1SYFIJveevM3FeMVSknTs+EUmWu1MrizjjTSb1uef9VHmPWsrIr0HoXSlNU9mokJvhKUWmsrRBwLpg1SmuGiXwPfC1RQN+ZjNxBzbvjTLTZ6wCSVuHW6H4D1gU7j2ZU32yDBE+Hwa+NeDAApTIdJKRsRJwv5R4Oyb0RZMqToxeGHupTejUzdnoB8JXaluY5/Kg/Brh1Plv/11YDeawOcoG8MxwKYURCbZLFtW4aqxTYVAFwOiQGTxZfd6HNxSxFb01L3gV8QbkbsdfxxL11J+gjZcCyxcsWliNMftXQNwULkiXEYKF5vWWF7wTrMgp3Anje9zCTaFvDOnv+daBcmbO+6hvRNSDf/xkJ/Lupo3mwJz9P4o/58aFnduZXk6VJygatKafj2J9IKe9st+0RkxNwxjjU/glOj1K6Uppf8BNzxKQY3zJDZ2jaY7mqTNrmEKtWwiD94W1i4VfZd4056KFp4fYjKcAn4CbrC3qiJEbuOaC/OTPTA4Np2eB7bCkXFOAZ43lqlyicdZUZqMGx/CrR8j6rGfJyMGoPZ76Zx0H1Q3A4+YnYxw0pTilH5t+wuuZux1pgIHeYF8gRQf8DlKm4nqTt60LQ8nbWRyiiQgBMvD/dbNtHGd6svtcpfGWFHTnkRFU9KIevARGSM2WKlvJXylH0GeCA6P9zy7ijR0u17IFUNp7UoW18yonLyH0kM6DxhGDyeW6GDjA4mjeR0OSze2x1bRGDaOKgVUyaws3lkJhONEGmzOObEYsK6e5DvlsUeJ1L7Q3MmSrKgn3nZeR3EFy4gT6v2Tts0VBbu+lgYzLYF4hVovMDiS1hTMZcWgjuGSODTbQJtFvWYg+qsctBFVTYhwC+eTjK/vjv35Di7SbeP42mq9hKOCnk2/AChBhoFWjpDdid54njeOlI2WIMfTspxBU4il0S+tOTeosMgdS8ple3ZtlJnOHtW6uf+1Yg9cyy97JmWR9hEtMjwyNbA1FpNvbAWCqDgLMy+K/vo623MyaVetRYkd3iCtM0Czm7q11AnO8Wgg81glxpxyKNxMwZjLUyrYBiq+ZptIlJTfXhXJwLGaf2kw71CCfKX+J5QpFcaiSBh58zkt7VABW8Rp8KK8A6ZoB4EKTlC/8DzJubOmqDNVgXcPjm6yFc2jxSFOvYTWbZ1Q1+rrHl0Umpkgw9UQOk4GHm8l2KiWwXUl1ALKpHHhjDNS7vykq/CzNUiNo9ruxnJDUyP6bO+30E1lvKJ2/v+SqjbGeziiBEmXQhSaXz0ho1juyclcoikP//JI2/GYNo9IFUL0RiYUYmax6RsO8IW6X+jJSmz8P6CKDgQl92OjOK/0KJirsZ7JaxB8VlouW2bHUxb3Q/npECN858ab2rlTGFB6KKCU/XYq3uh0d+gr7r6vkzxA/AiL5s1dOFv4J5z8YgqnPdeRv0vDSJE3lT5/kKS8YOq/aZ0R0AXIXtUxNCgUdSgBwz8gzsyqMP+cIX43leJMttequKRxFMWlImC22/hbZ/3CKsiYBYCCGdu9Njro4dgLfIWCh8dLuWTL86pztjkFiy2dErUoZpJySeS+RVOL6GnErvEGJT9CkGCBAwVSX71HM4DoKFoxsNrx+gHzAHCuNAYiPu+mF51yrV+050UHHNJRO6NmW4y3ZXk5jHudqGOWDmySFoSaXfcf9A8dM20dIM4Hyl7yVvZ/5EZeGD8YHOhFbcxUyTtVKfmf4mWpm1gtcZClGTBAZRLiKcevx9eVEyYHK1hixbVnp6FvzZTO0rATqui+pn5uCi2uW5kjlMsiqfxYLiNSO5VCxS9oQLZsEWqCnMSmUidaWMNVUsrG0BiJuWh/p7dwqK6MGq4N04Y0YlLC/9z+lm8t3EPDu6U+PEkldoRtSpxYlDaAep+VNJcbuEO6jfYmxqYX4lIT/MRhke+1baFQWIPDV+T0UC5rZwt7NAM573E7CyM7H5d+Bj3kFXkODNu9J1VExgWOstK1zWtlLk690VrXSqp8fIpRzC/fGNQeR9PKMTv1wQ5o2WUv2EpfHnKUlUFhmERyotTorWtxnwuR0/Px0pV2xb0w7CvM+gjQTf4AW5IszvlAGkjI6q5wnsMpJDPdlyTePt9+gnjEW/j8lIAQmpP5HN8xBfpzuXYGUScb/8QFUHB0kK9SIhMuba1eZV3qdP+Ip6CK2yjTxxBt4PQ8QiZH5iEkDjf5arnKuJpAUnWRsFkTQX2k+0NNskZxEjuxMakHoTtZf2PqXEbM+z6NRrqFaRPx6psaE9YvNJUG08bL7xu6JTSgXf5i1lnD1MGQ3SOZeaIJ6a5vpYvi+Zzd8jJrAL83vXztw6cZJ3Hai1DspxagqMZCjfK3snAjLapHpajK5eujPiZf6hpWikBkc+UFyuH4zDg4jMie/IRXmpz5F6TSXdq8mR7sUTUjkpCct7VPln2P1B4u1s3isXbIOcv7msVprDGAE08HfxrKYLXyUFbx5u/g2pusd2+0GVyfLFLIC7L6vnmO4fSnLh+PmBkw4K5+97yF+5XgLIsKpGyUgPaGjCDeiK/mr+OFrtjDuHH4LlcX7z/1vetnJnd8da3wYzfSZ0c3yppzuxEbL+l2LLIdfHbi02RhD7GIC5Df6qEaT8U9iFlKge8IsLGNO4hxLfC8fiKEcRZrHq6jjwFLj4NnTcmyCjyVjM5C4+o2mF+l3/ydTSA5rfSbof0/K8wA3QU+dh4XYcVbAlmFkJGYkTjeMeulzNi2erWTdez7kuuI0fH5axmQGDZxLpQ6b8Wc3vmnC0yYdGuDmrQ5zku8cJAbcm85zhtAm5eYlD/qqVHcq3RQnhgb6S6ZGSkCmXwO/r+XgY7V/B2jw6EKF4iwzK5qg6mzxTj9btqmuyHnLaz3LoyO3V7Kv6HTy7nhUARXgcn+J+ZxzQdXhzAKyynP2ZY0YX45+gJ5SVgW6E++AhmcaSA/8pByS68Zwk46ICUd3V1XmV/nfsoVQ0QJHOJFqbbSEb7kQbftCtshfhb5jzpuAGKNePo1n6+xVU3tt9Prf6r1d28wAIcs1hie3qmbu0OeNrmNhCJnLxP0cPlUyTYN9WxLXRj25BGAbl6UZs583rZiZ6N7IqMXWNotKw0CnzWME8vcy8YZ39RJmvJ5PcFVXBP5prbFHdykVFMuWcs/uba0KUV2nB6jW9pJq8ChXnjPnntuRmeLGdq1tgcj40arXiWEtVSgIopl73nEwtqlmMzYEytl4aT+3/9/zjXqC1oc83BORVutZtgYnai35joHsV/tzxNSmS0/3X+ixZs80rDWVQNdE5kqO3v6QlhVrjzCpq7wDxj37DODhtW9wozmPEDdWNvobFOPqtONXPOuJTunDzeqW70kjCpwT3Fh8yItT8cni2fL8OnEVJ9Va1ws9E322W2x3oCd2RH7hwxpgwRNJZAKhYq0STa85mSyzmGWkVgZOtuYI00XolPzN+wU3Y68kcr1XrNeBoFF3QH8zJA8tM1TSjszkNSLpV84uKKKjGblukvAeDDXc+QPJCqfR9lGaW+W3emxtpRS01lxyOCUnNiI0hZ6grbOWP1SKdFfaRWEuweoCCoBnxArTgUOIM4v5UoTab8ZnM4Pz37d92jymzTN7aAgcMCCRURh9f3wmQXE/6gw8R7N4e/ro24IGCG9LUlUPjoCs4X2QIJsWIFx9P6eaHoJ0jBMEzd9ssI/z3ZF8lf4iBb/AkJz+DynFd/TqZHixZqwz7eQ1qFvp493VxcuaEi5Dl3Sm6jPrIsM86aCh8iZvie6nBGZeMZLMGaOL2Ym+6havIGTkJ3B4nks8HxuSFthlXBeDiJqzlhCsniidQuUskR0pYPDBCy1Qt3zh1btE6Ola28or04rX73vVlV4tglgv/JbMGGvfQdswfq+aX4wAq5d87OAEQXnBZ0w78V/lxr2ZHMyQfoT4H3AiF5HtLMt4ixK0nsiC+d7ujRGhm1YSPrc69iCi4XJ1C4+OCiFODn6pBXOMeE92DMIykkoeFRZccO3XlSGAESCf9t8ZPklwK186YUP9HigNDuOHAoYa+l0OHg1fEHqSGyaVYCT6r5y0/gwwwIB6C7ZVO1h1UnW/uzsQv97uTh0r0f557XFMhV1Ato1c+GDFBrS/8SBmW+XuvwsZUxkA1jqXbuN/aFM9MlygLV1bdJZPWHZJi0yT4f8HuiDbO6iorjdfKVqBAke7RIdEn0ggpkkn33gKkg2Qat9hMy3F1w4wbTbaLuPeG80TGrZ3ZziCBQGJt8GGiJ8DUSSgkgsEQAHErj6G/pAw3ColpcQXD/FaEaeeQoqC1Ut5ekT4kFu3zycqYrMv/iX4kLjtk0Igk/SSTaMLQQSG0EHQbomETyC1QKkPPx+a1w3thjrk25b92qWQZQ9stJbd5TXrxuiHmX9dv8VJhBz92O+ohmxjwz5Za7fzFnbYCHyVm1ssl0MQUN01NJh+wTQYd0NWiIr785Xr84VT9pmbAhkV474NGC0ZJJ0Li+Py5cKssZfitk2Hxzp62BvWdSExBcRa6nmZhLtesGH5qNm/plUHkurDEY8iQroXEyFJhHvQ7K/vZXcz0CAupdEsagsIaFrkBF4k1pGy0PvDkE1vT9swhFjz9pIYGo5LUuU1QYkWP2Q/e0vjA2wjC+qA+K7zuMoFSQwNHS/h23TbWqwA0DYaVoHUr4GIIjbaub0x0wBYZf8MzBlIWE53sbfph881kQlp2lbqhEkjdVsN69RqMcXmhOUMxzRin86yy1Jev7H+n2gFCLggAANNt2L9vGzbZt27Zt27Zt27bt+hmziFnIwSLqF5bXojb11rKACPJ14ZTaXwdKAayZUilkSRWIgpqDTmmIZKmcOzXVOgH9ZNktKIYo1pSWdrn35XKqqbStIL7mHgpQ0j6x5seXRG0fiUw/8k8UIXegpROrrPcKfo97yWD7/ujRbJkmHBJhm5UesqGg6/oL/0f3ornIPH2KCRRAD2zbpESoO0RudvrfDbcizxuPBnzWe6+yqrfVn/t0juJNiZwCU5U7hPLf2ejKI55ieMqZ81OeyPEa9l4SAvifjvMdoRC/MjztIMWdsnymCS45gXdYvLcYP6ry1e51m0MVkkCIz8qCzSQO0906IAZfR6aFkVdf0AqJwzbWPYgTDM6kmKhAOHvjP1UMDaZVV4vUrqCWGM2W7hvikrBAX1Yab5SjFjjIXmpPUpgEE7ipD9+Dhdv7S30rI9AO9s3eEh2iZFqtS4Md4D1Z+RGrjpDzNfk60D1HokM5P2padbdWB/oJ7hAdSg8dz4TMAkiVVsSplk2KnMeUARimXPApJdjCtGU2MDvxT0oWSSR4gvF/VH4M6R30NXqiNZbYkSKtkcBLbN9AN2AR3aiQoZ1uZLIZmzAVL4HmES8lhmCy088eEqCcGIn2Kor87uUnahI/YkEbsm4WGYl73+1DER9pIIp1HM3Ws9uoZlxWeMtq2mx/RuMuWU4Roq/Lv7R/oyuDOo3k400mvyH3L2FDCSqSF6bPtMS4/HzTLUeqhNBeJM9LzAEk0vysuZsTjMWLLPBbZ4V2OSB0NLACpbEkPOGPyznbTDOUVKXd6ns7IdAdGxaKRxw0VH7LjYzNSEHh1h8ZcCIogdp7akzh4Q1ezTj5MWnzHqh5AVfjnrEM57DckuwyBS9kxBJDnSzXxJm2WEHFT5dV3D4zpMkTZqAqiQIetGARH0tYft4/wlB2+ZxOzWRPfDrUd2glXGfM2jC2DhhJ87VIeTUnRa4brwsHOMd+Z8PXNcNCybPuNUcjGmzdzU5U/VjhruPMsT0WiGBG/cPHSjLir3mxJf/kd4UkMj+w8DiJsheDMDBEs8sw7oPyipGcoY7S8b0slrjTNvusaJ1iQ2tgmbtT1U6J0f8pi2w3dSPChltpYzguQlk0A4Hc7DAjQGoz+EEVFxQY0jPGnCGvdUSozX2Ub/6g2t1HOoLJRXELG/0DLTIHvan3QhiB4+/S1pR2hwwhJYqtYH0eU5oxL1xMbB29L/V5anyLot6vmDW8ux9z3H3U/qS61/+upQ26fPqiYeTEtWlsJoRzoRJW1vNHMGJvPrM2P4X8cBET02RbxlNenw/wv7m5L+goaTHYNWcgutQfuQN58uujWlfEXdzdOdOcbp5iMgdfZgucj6brHAc37Z4F17JdThe2mJIAYE+in6ZQVNGT6ArnDo6vG2X+foFODo/a29jxF2//yk7+hCSJI9RS7tbXDNuNJR0DVcvQpBu9EjNlaPAkLcpBcvfkM/k0MixcRLIjNVGRLDmDdv7ehGaj+xzsbaE1XSwbxoOBqMPgotKv01H5B5vgx2fi+5bKln0j3rn86ZA2tEyf0FwEPBqAUAm+d0H2GBoueJ3UUImwlbmK3Ufjj+IbiCSeuUS7FAxulFI+yH8WxvCLPCaJ/FCcxeo9QXWbNGgEfXQFf7RH7wfJb+r5cCimsUBMu6eo+F/pMmOMRKymsYenl1qhW0MZJec8WkNfXv1VbWHIKM4trqPrhoLluwr+kz7/PZ+YpgOAPGnT+SDT8F/2tfnJDJ6SqolCQsRH45NenXoQPXJkhcq1TPp9IhNz8qTt9/gomR3nxFgsczEdnPAVEXSPGwcopLo4eSihX63wz29AkH1HD5TTbaFiigU52e0R+k1XWbQ0cryuCGUsy2G2fyMd5koddgENtvaOiba6RbvpZmk+mdrBwfsAL0YZRvxWsKNJmPIgZd9tIW8B03ApMWnOP7J/b0Va/4V+HXHD9uP3Xr8SuQMLXFfv7z+EWzoPPjcLJYbMNutkjSVAeR4cVD17AjRhdLao2Ell72CoeP/hrSFLRjjgbhmFZumono6KKf20j3NEM30970kTStUS5ydvAM/Y9MAHMOU1JJraKcBFGUkOQGsiLEVZdwqVy6Gx6RuevDd7kqzubxm5zgVW746M0Vfl9p/gKDfTyqUdKYaIvGZpGE9TbnpUrbTKtB5C30nPaPgQUaX56WRyJvdBHhcEuBJCXldK/Z290hKkEK7vkwf0YO3qD9HoQ8+2R8UNk3i7+Vq4O16P/iw3a03dGkpywjUbiIrMC4UIul45sTuTMEK50vnXyvd4y2tVVcVONp98rvSGmi1s9e5sAP3SELhJd85jGmy/812+vkVTAnWQysqdKqLs5coLwXR1qfWRH9aWs4o64JV5E24cQ+btOBrJCep8dR0uiebYQUOpP8h3/rsYwesEkPI2H0GQh6lmvFcTmiE9oEsJEZ5fpe3gr7KPC/KifsCP3cfCFqGoN3WcrNl5vTj7Q6qaKd9RU0eO5RlTxhsFQJM1zIrI90b942Bn1YezjfFgcLR9JHdaNtpSMGjBABWKPqkuSAcL6Ar/M6RoV8us24dK++lERtOsoGNflq2X4y7bovPZmTJVfJPqxyRznMQtt3x3Hy42UB+mARmZra8BwCeJapF8gRfbe+j/R7wx6iWs7oiN81DMZLywKQALaXB/bl/+E4r7vEKIZADRe2LFi4JcPbsc0AhyCKaFXsDS8hKpKBO2554VQuP1KNADPsjGDgrOBmSHFWjWnoWNIMFZS7iArV/4iluXL1UqVeAQulpM1NMYiXKcCJgICLXh6FH/TnggPnUmJemI1AeKTmJn2AByBYpJlwjEdgJn+jVFLgvQHY1/XLDzhf0NZ+mEj+adsMSacUVfUZS1Dvx+fZbGFeY9MqMRed2w2fii4VmqpjnF0Xu3+Y/4AUXXfhuiEkMlAb1l8ywa1KoVcTJzMYeRv5YLRhaF6KCcN9bHsiJYdZczGYuqOmSAvIOB1UkM0vSo848Hywp5oWJea0w9z4LifAnawt3F5bTknZ1xrOjO4INNG3L7cK5g9SqloVgGG5LOqaqm0jeBmhOeLCPu27yJDYgVfGzvHDvDHeynyRojytNkSGMJK0qyDt/YWRBmizxAFs05Vfp1WYEr81Xm4smwFIp9YYXZntyVfjWf5UbL3sv5W6CHwoGwWNnB7FU85g2t8YFVi70BSMGMWTHrTVTheZC3xOUVbCqTXKOnLAjSrebPdxVyjbv1MJoSfLQvML3w6jjIgRg52cjrhhQhA593ifJB0mbHYhwtB3SwIZAJ4ghayTWvwX/xJevFd/YPIXPMewW9DU23rs+XkHXz6DQ3F9G/q/98fbK3+vtYtk7l5gtLEcRqpqWlJNJUTnXuV1WonDcCbKG3TXSqBc82hH32Pw6U2QZG90BC53qU5bGb5YL35CIFCl/Kix5m6ofPKJb7uWpJjAlbl+WUek9gTq5L0cb9oWrNq6Et3Limrwi3ZDBkVdfKDg8s4iLMJ5t8Saomi1SCcUG5F7jUD89MLqtWkxv/faQ7rKUmXYxahe/r8jjOk8mKLiGqsVC7kYwNFFd2mKkXu+PR1fWLtjdvYpe9Qv+ZUKOnO6HuWc7Wbw3QXYkFsI0GE/eR/fGIRAl7dB3tFPc6jVv45oPKVAJ+BgSPfodPyqmV3ACzHBrGJ6rG0uvhd0h5lkvGO1wFynnYZqxR61Ne0nVsvwQ3vO7umhngY2VP9BpziY12lMN4N95W74dUIx716McnUml6dEvb9lXF2YmMQUbECIlIKny9nn064ePnP9CZBlSQc5BX/g71HFG9YTEI715ujG2/TPB/M26O+f38sAR7gBcHhhLLxLdREtXen54DJV0mlZgWy2zySnN9rAmNnR6Q2hBJcIhul828uNxN41g0hJwpZXPF/DBcB9CCFX4QKX38kJvYxmle9h3sWryB4vJK0W5m3LkuO6Ow3/e/CTedjSiqY+swjvI8JtfDDp0EYl0nWDPjV0DCC0NGSTxWGAH0EjCdPvfYPZAnfINYpYeWpOBsLnlPPk6/0OMRG67+hG6u0mBlTBfRdr0ihNuNhuiJ423oPEOadCekd3JztOF/gL711NIxsz8ihJ/PMTs5TxxxoTw+MS/dLDg5rk9nQbNsKJCTXfu4L2bZD7h6MNuUqdOTBRK+DWmc5QI7WBrKXYktKPEo9yBDHqdfXfr61freCQlW5mPmIeZWH/0rZ8nTdHmxLKiytxJ7STU8Iii7unWytp647MGGyJpsNKjgf0DpFUlhZSUJGfyvA05iUUKhMXkeCXYqpuMMij3iUoQgD5/K/93wh4AOho10+uylpQlJuQ8DPZlMW3vBaMzGgJGdB/LBs+/15aSgFTbDlv85v6d0kQKGV+9q6sBK5r0r4cpvb0SQlWKcIOEEvbAGKb83nECDCQ7nFLsEmnY9xba8vL2AzqGFEOUoX7g/Y2Xy8ZSXUTTwzzeebsYzGeBCRDvDB+Ym+ugYG0Xh6xFmedApDbmg6acrKMaz+qOhJwXz7EHnHlpfnlsAfbOhcrcgS9KDo/9dBfETcbST7Q50YACHKlmd+hPbRz0yrjJ++8hqyy+Cv0gXDDzEk0ZkU0Ql3zFWJ+sPouiRSmmM8aO7l3kD6KnvTNeDKXrZaFmrOVRSdXf5D+NqzFVp/r2e/FcXeIZMC348qPOM/46QrxOOgFKybNGm1poZJgkGH0VCt41qrkvpF0v5Q+0Ck6RS4V7TvWnMOpJZCghZtFsrzIUw5fzZSNYNqcaRA7EuSpE2Wf7xLmKlYwk4tzCyYzxK3uXQsmL4Pl915U5nRpdE2b3qMN+41pJ4PgGffJMcYZWbUSbfvn8ZDWxSNLK0MJZ4fMoQL5jCOVDMx5juNJ153fbN2T/agfMbYVfOsWkFcjE6zLzFhmIzji+Vgy/B1ynsNivyVdcwIf+R+rIkIRmgd4vlbm8M7WysKA8DKNdgkhzV2axrl0G4Vw+x/PM0kuMwgWVwB7ak+ViqyU6E+hE/QVpGl2Y8HfELgsYXR554TCMURLtOiZXA4w8xctNFTF6ksnkDgbNLzDgM4PCV+xiDfw7Bi+FVk1ytVmnV4vIFkZ3pwohe18ycffSrRafDsuRd00uWRHqVyGY1WYXTOW+csVqGbtV0IvcEFGzFVONRzMbwGHn96vcYRz+EPA1CGhb9FNQbK9yMNxTAwrk8idQbrD+Qx6iWIFfwHlAuZ0iZ2DRoyTSP2MkiB/ZttZfqpeJkSSR/GG0eZ/s5AmTr+7Gup/p2U/u9pmLBs+5xBdLfH991p0sjSJRk73CcXWMevcSX5xDjiTunQQZgcM2H6a2RPpdQFmoudURsdvR1quBm1y4mkwddxnuB+lOK8Rv5ynDWGVHc+ngrCIkYFYR0hMcX8x7aJiJhzTgyv0TtGNAaNofS6mK6ONY1ul7nc1PxDHCQ3bjjzuiPf8gN/Omt5/w7idlg0w50IugRWyY6DavDhALbBS5Za2yhBfIf3GvKzhE9EM044HgD0cObcz6i5kgEG9Ypbi4X0A1XdlsdTLZxs64tNZvy/yESfl5cQWGIgSLVbSPTt+vMr2wkmzPHHIahpsLS9+rfp7lpTA16Wb0otJNKRZwnUlEzsSVqCBI8zgAv8g+sRVB2EH/2gb+1B03aFAzyMkx+OYYlgutHl5TYv+258MNnOKEQ4K/Ua+2zcs47D6pkJdyRs+P9VV6xku6L7Ehg9Aub2c/I/ToXhlxLmlsIS7lbDeDZrBRSYG7Ox/ko77YkqJB9LylRFW7XA3lJ6tWpfhMSt/s9VRS7fjSb7OngoI22nbHoEXq10TrLyZyUI4zjLzKSZZrQ2pLA268NEGzKJGF7Z/lx3V7Pjtsgzm/R2ebCUkjR7FvqX2iI0AQRBcBX+LdGxNgYTUX05ys/UylHA3RhcEd9kSnBwA1cq4ZJdf1ChUDtNJgmEVCki4GlcKAUyJDOkQyeElEx7oAbv2mWJ+KqcE1tuhDi2NBPSpu4JyWXp03YBbgowTJi527SswpBxTgMK1a1HLUCni2TGTYW2lq3lr0jIWUxXpSwfwnjKxJmt1jQIWB3Ze2itciS7qq9pZh4o4y+MTqDrlj3LelbyTkudIDaU1QefZOlncerCKpsM2iD7nwxd48d7l1cYZYxs1JUGprvl7blXXetHwJ0uEaT6WpFOfOR/lggd2Q8IsacXUCR3b8tq/YHLjgK7ihWXomGG9XGBI8idG7PCtcvDTsEprVrqXQ/tX8FtFpnn5ShSbXVaAMlpN12iupVXXCLSEfYfw7tu46rVR7vK4O6dL81n6gfsHh9dCEP3lpKvw9NclIz2pNNnolZSQsdVoZKSVXJMWKn1/uh3WYi7tuVgpuCC7qFQd0VgFgDOnvkBEg26CJcXlHjiePFqc986ErELiTFNtEL78uyaq2SkY9z2rU2RjDDsJ7X7ZIcfstQrcBuDYvPbdvh3gRVbzaXGUSu82WTuHWaIymLjBzsNIVwLh5cep29HPNdeSok1Ss3PZgveu7yDrDguCuJuqpOaWk/YeL7IRvdu53N7YbPcTCXWw0FkxwPRYcHvrCZXGq14PfaaMBLZ6MRrxYLvUklynHHMEB1W7jPuw/68exKTz/1S47GtL8FjozrZDDB1Sa3UsynZrUlZghppOrM0eDfgKfi5o40TkaaRhWLR3g5V6ymhcbzhp55PU12py2UBFcQP5hvpwqDZKN9/61P3nLTrKL204GtUU87pt2w1slwDihGTsAPAnyucwiAj/FTwJ5BoLXY4UVLPU4PvB33xLObFgTyt6hsyBSsa3RLD/n82Pxx5ah0BHc0j/OqCyr32xvDCRuk7KvJYCQYRHX/7pJ0cWlSM+ltcSAbFITSSo7k+l6edoI76xEAmdqT1gBoBi9QmwyGC/LQyuU9TmG7SvaA1aYX1aPjZ5q1NDRNy0s5xXr9tj0HmV6yT5SsYACuybvllcRwFOWeKLLFO8Fi0Ui8Y55mR3Rm287giYVD5TZXBwqNYrAMnHwB6Mg31XqElTXrYAaOanvKmL/5EWZNM9cdnGWZ4LavfRfqFx3pv041Xiqoom5nimUbMfF6PvE2yJJXgEW8J3OPeZLhR9Y6Z00rem45O689S4Npgli2r9quONNEf6drcOFZ1F1e2cFylZhvCrRN6P6zyyZnWaeWMZmtOY7BO2m2iP1aVuQe6WL1FnXKumswjpoki/Bmoch6ZIlSs571hf0jtlIhG4zuHbI8m58DflANuv7XfVaggDuflZIfWZ2UgxCWgrasLMqZL+4jQyaXP3VmaKkIP8PJrzAFQG4Yl/CgSdj5qaeIqzq2jc1GnunFPo1AWKjwT6EwSMAop2Bvhv2wm7T+1YVe3ZFwRsTSyjvzKjeVKibRfgsscvYFwMAkzjT9xxQ9YHGAIEZOAGu3iFMpQzZwyohIgAJ+OyoJaI4V/FL4KX0jSDo9Cw0wGI9t5dD0dIflttg9ojabz9t5knEF4ywRWOFBld2CtWIIe1Am5MMwzOHxaVen0ssT9YoNI75FbVCVFiR0zNxw7Qc0Syvqu3BauBog+wd/Ur9+Y0ATgvobfGVNu3iuaHxshGOdF0nJF5gXrROmX5Hvk9PKRfA+p8ClCb8OvD098ak8/pS5ZrMGJUOJDwKZI3raYa9W9ugWfdDuPpEj3TG+GwFO87wZnmVaa9aDuvTnoqpsrw6DzauCUHcx+Eq5ZN7/0xMPYUBlZHG//TSi8T5Y3WmK5bK9w+ZC5KnW9LszennUSmyioukx1xcwxKxq57i6EAZYWm/uKjwg8Yy9oZn8tw2sxd2HBe5YX2hGM9Mkx07ihqx8QHiVjE8GgKlM51diNnN3LGRAfiE8QumL7HVmFVOIxBoFAneSi+xt/TfBOr6AQIUycQfi1TD8dSd5qM8YHVDCiRy9Lq4oOYWdgAdo+g5PGbt/JQRRZH+YkqXx+4gR7T8jSZPxYHdPF6Er8gFA+DwdSyDbFbYP/3bWoVnflCg+8liGk3WKkwDXX+zL0kuvzJGKsuCw52F+UyKHGuEA+0R99dydoteE6mjbqP84jPhnk4uIbMtr3jOPphm36hMQyPpqrp7T0oupKahOafpY6lkgOQdb4Zcy+GjiR8y2vLXFe7KTm/yFCxfC88adxWzJtBn4Es5cymH/RysTfjYKQthqDRGA4Onys9YHuSW8Hp/0mPvdsQlZDAO6L5sg9xwQGMlyjIfcCeeJmfpuy3Pqmyvve2udbiwjGNdmy9pl0UPhUAdPywYu8Za80eSaIp7Mnnv/CkN8AJ2BdIS2bHFgN6KSew4ZjPNYOCmRKxwazuSwZuJy6UNtstfT8/1FuucCUKwLuki04GFFKw+e88mIBYPApjcEzHInuPhaqyNLrY2KLB+QfJw+X8Inx8hhycCWPT3LIY1PEk/nB1ddYZO5XX8x/Lb3EwgTIXZ0Oa9ePQuZGyykG8+gVOmvoBLGBBTYstTIz7/pBokQCVU4WM1OknDEQFo6k8rzNN7YT6Vshr2CHhwwinOvYfI5TrarmxdO1I6zI+G0mh5w4G7Fzzo9ozQdikUZ/EdJYQwJdXjT0+3F9gU8b2ISRAeOMYmenHB+zvbOMIELst6CWzTJADcp+HA/FsLO0h9Ta9QHMKf45BlSSOOoL0LG3SNEhINZipekuHipTVMzHYsiVeErA+KCghIv8rE3iiTxEVbjQHjPHc4z9ot+7WOC4JZ387luApmwMPHbxvpITHcL3MOm+RG1SJscfbGcWPNdoVK1sC5bmMTpzqswxRlsduEwSkE4Cd4iXJ88Skk7lDS10Oo3k8Q1WPz86/+UhHEs96/IJ/yWo3VW3dmtS3D2F6cID1SGp2EZZArFmBbo9I2xjIzzrOswyaRE+Cj9dHryJHv+9nh+FSKrRa6rR/rwbLy+ktfB0cr0fXNmBlGgNcls1SJxtD3s5K1DHtcwA/WWzV9qDxjKB81FO/WxWkbTvFRbq+OW6OdxIBD5kki95qItnPvzSlqxEa9G8BWj0v6yud6iC3EVHtd7fDjvt84dYKgbg4zW1kkecBDUuuyvA3UqOcQHOqNEK0lMbOZtaJUGNYQb9qnwFARl/KPmd8UryhK+SpOByGQlH2Yv6NrMgQiisi6E2ZlHS7KuhygxN3dTkMS1ldhOSniQF4lG8LdMUkaSOxu+KUkPNB1KcPbNRLPODxfMzbgJJP1YySuXy6Mz2UrU4aCUY89QvfN1+relDxGE7rFux0dcfWvwmdmpTWOyGdl8aqQ2musI7ujqPgCrU15MZ7i5j4oGcoXBi7NUPPnUewnZyKd46pAmzhtUlcIUqKV/2OfERtsytXbBmkjU4P2Tr5kBiepMniSmKP4fwH4yCOLcv+KhQFhM4VCQ8ZlPoRH059EwCzsYs7A3SuW0ITFxCZSNAEzgjP1H7DWWl1PRSSAWZMBOB2X4VxzOlzfU2VU7Oykv4ZyExrPG/OF7Oj1FjCjxVU3Zmn1+fxPI9sIMLfSm4FhUEAkXq6P+gAE0WHLI5eidQq3mEdm4vvAfjvtX6FpnIUYyTbn0vb4NQ5sF9cVhwIPCQ6pzzPD8kjdLgYYv2zfpx5zF2sAVTyQ+mHRbSEQmCmKauzSbZPBGi5MR2fJUGTu4TT2DweccCgT7p72UuWKZPu9s0w8v2FiO8wc/UssQvoiEIrSJg7t1DQGQDR03f96NQrommAQOmc+6M2wxknsKudsr4lBvk9rdlMBODaMW/b4XG33mLh9fHGkZ1RDdPXV8YWQpnecWlwv7h8U2evUDGBVI8Q+lPPDPEqvDz4/snOtSoCqVutPcf3BdGqGlHvGZXMtEFL5gTs5BHCgWnJVXCM/KPTQa92V8ePUTmaqcvYUO4n1d/N56wVUnjfxUMkB6NSyOYedcRGpsmX5XiTSCc4aymaQ1S6PfHcDlLI0Az6gtchMY1xtt7psuUqUswwxzbLImygIp978jpux7CcJIpA9AWdXz38Y7x4C93Q74JspNcMYjmPabknQoE+XRsUdpRBnen/XLb37pt8xGtNs+yc0dfVSxg7TaIctPdqyTRhuDPDCtUEGifsTf01PdEJrhgMDA9WIQWA+FMwokc4sSufWPahUklvKJRl95rZ7EKTLwcvun4to5/iQC3W/u0sSNtPDX5G1XOo0zxw5/gGPYZtQPzwPf11R8ERX3HlS7HmtW+exGCDcBeCd5qjtH/eVHV5+AvZ/PL90KQz2+Z0RihYOgPV1vZRzsvFs5X6mgss5whmCknKx+ucr4FcKuYEJt0dqOP18Vg11GG80ustt0tRiFgVPfgYyew35YWKsvd7KGD4SaNOujlcoJ1pk5KgfYHwev2XzpzVLzG+aC5GcvlJv663jNrIyfglaqPMw55tKG02h2uc/KNJCgAyoiuWG5k5plPe5IkJAIkNXof8SKIvq+pcj81UQrL73+ZEUG94p+R7MG0RwL+TamEwqm1YOn2UGCkHknhhp/A6Dr3/0SSnk3fE5+UfmBMMgddm9hAU+neV57myQ566sXoK4pE1Sleb186fPBNuF0SbNKPrR+HPqGKr3NQxZbuPFBSHmz4Vc/S8fmHMp2/4itv3syi5xlYKLBmWHm+bblQhok21Gj+6hy71LIh+9MmDKqg7p9fH4dFQTTX2JrXK9Y4rfGJHE92WF8LS0qnRUSGbS/Awwat1H/5ZKGFFRlGKK0+0S761UIIkbgh2nWaEandd0pgW1hMfD+jKaCCNKQGBrilpdz1tmaqI9hXPLQdB2PCVgsr2PdVDAnkoaBcGdgGP0y398jIhgqLMRrbymdn6jEbCi3k0mJV3XeoGn65nIuZsQS3aIOOzO7Z82ebtxjE9TisAi1Z1+pn4ocR7Tk/AQzhcaW/BfRAVD06u7BOJT8b1MdWTVlEGu3Sj2JPA/w8AO1TFx//oDnc1kncSMBqJx3v6UjCg3RW7GDuYzzpD1GEAotr3tOtr9eLedfzDxBMm5l0C62nt1hezEnDgRUCdqhC/8o01pG4w55OWwVcMh2YiEpaYyg1VdYx4dhiLTxshFlORCU20p8mJHfovGnwJVkLO4WOt4H4/xLOyiarIF7FCGeUQrtVv5bBClF5fxCylhau23LtH4eXnQDtIOtojBtRMgG6oHAbqvOYFi3ZkxE96WNPStjE3vOVpt/jZsc7CGWmdaoq7EK7La5vCAYF50QbnI8oqmThdgEf6yz2ojc/q0LTgs/SITzl4CgBBo919BWYSp0YchCb0yLomLx2CcEpqeZFdqM2MYT882CCXheI1npYb9Wj8oF47l0qijb8dc1lngCeZxDeLGkI6GTQ3sAoFzUvHvB5zDPBA7P4cKKqSCEkZLlj2AbaUkEAcfn5b4DFHV3zoP4RAc4zZQs8VCDnHgKqQYVaz8rzHt9Inqyhyg1ECCIaCEOmFZG85jqLPIYVufXysXkArX1Yio9itWmmwaYkzguVqZ2ranLuZPdrG47sDhI7E/7XnIVwCFmTESdAkppZnJvB1OOxOGvdoZNWOkIYHskry3bLoKVMoSdi/wcLXvzfvBHaDn+2hvEPQl6gjzP61sNSFrbXn2RuSRl98juh4/eEH1Mbd4nnLpFgqSnVHJQVn0v0Bgo0a+hClJRAhnZB1hiDyRUF2Ms0gyM+aX7cj6mOypGLfkLnJp5GbMKnWRiumocryUcQiUCgqa8NGmXzPi4AVeGjAq/MvxCsaB4XR63vosCsg09mAjCWEBWGFxS+9DrRphxW4Ol3BXn3jIwVDYu319QE80iR4yFsG4+ihUViXGMPnl2s0yCYU0Ek/p/ye5/rxIhJIx1KKVTafM5wsqZw2EjD+n0YWzu/xVKbZRWyvwew0ANzL/1b+0zGyYfO1jgaFwvSQud8gHh81zBNizipV3PC8U0bDsvHg8b3hTlPKcFZdnXWrbR4KWjx62UTp6lxGVmZCPp8pKNKDcRSuAn234DbSsl2JgeQnXcfPd7vbaSd5d+iOwc/2LMywXz7GVNGWLNTiKKhzAu1UfFXPpyQZ5R4DAuOHVb5Fiq/dlkHJNzFd3VcsyIrlfJ3txwvUCNfqaUx/Xgo2ODzxeoE6eUHpltYvS42zs43I6y9erYoMHZMsvTnwkF7RQMw82lw93gObMVecRY4rJcTXYXDavOqR0Cf9QXtBfMigcMKWIkXgvAEf7HtkHqx5bO5Hxy6CO4IkZI/YN+TxfyPs/RfiXgz0Fk1VAPqOyfZ8b06mmCyIi/ZQCdjixA6eUBBPBEWD3snek6Si3Lr9OYAvPS2ct7Q6PjnYqjPf95Bz1pFlgq4ayCyU0Y3uY+ObVTUpikzBPK9GtqZIY4C6WELx2cTCaRCCyZ5BjLtWrM7BJRRCX80KZpIFqk+zKy9gvuN8Vm2S4SLTKSvalcPwZU9Dab9ylzh7SDFuEJVXvj5mKtcWtMnKN71hq/P6zmIFE0rIw8TevNbGsleyxBNTsnSg3FGcSdLSp1WlTHR84qxaFfyvSjn0fCU+Vn17Hr+6o2NbZk1n+ZQ+FIaDLrDod6GZm62/11Vxx4sdOoJo2dQO4ck6BfODchph7lH/n+5EeBDSCn58XJE9ud4uVdPv9j5/UzJfUIzJUnejLDA4Ug12B94E07nMkLpt387R4RXoHedrVnSwnMofYgiz8zqXtCNrx5DVm9jUOusa4lNNQa1sMN8Zs3I7q8UceIGc6vTexo+i9b6RT15Z8IU9SxeqNI+OnfwUwNaxIePALVy7nFhjmdw9yHIXENq6NnUfJIHhxbYV6CS4fmR6aTjZgIrv9UGJTMwQ+/ps9VO0tzx+TTJgSJdAXpY8nf+M39IAVHErKgwvBvhPJtPh/5W08+wL5KoQvB6mjjLz1JGmw3zaOlah6w0Mr7frmysX/V+bUpavmzlawSUxd9uiACzBxH/WWG1Bs8L2zy2C4+B/nJ8NUs72Z+NHKVC313OVKzpfugM60kGaS3hJPu6OsjbyVmlTo2w4MFYtjmw+I66f5Q+jlEYnmL498O2XNuqGV3o4dnpFLx3zPzdLIgQpojO6a36M+hrE3HNVrg4VGB6OnPkWj5pdZo1g0bRi6fJajkqVnicTHdhBD3lXMKBlCfGze6oWqjASAj9lHLPTK+IMFrBnQy9zH1vaL1rVSZtl1VOOCqfAzSpsFJnKmO4SrlTdpXH6wIUxaStMXZzui4SHOuITNQyakKmYAd8igk3hvcrY4UhkK7Je2mbeNBz80kftVyfri/92c3b/pa+/P9Fjlqnt2iNKmpWf6w+01OGS2xBRy5kbRQUOo3dqNcHyvgemb0yCTySNy2JEzNId9ELQFdATBtFNzSs5xMmGa9Ggkn7rhZgjEoKpk37Z9RAf2iOvPsyYx80BY3EY3xOET7Ipcoze5seKt1Cfq5ozKYkWapWaJzFrnM1ra9JBvLqVM2O7swjlMUTQz5MeyvxLFNn15WyVJQF78YneVmQWsk0k4Bmaya9KkUtj7/X/qaQbrhXfe7JJjxlVnD/WdJIaWr/iXn4osKiirCbOMG2UGr9jO9wRSIX4lrUbh3JcselHrtu9joJpd3fDDru9N6uYrECS+mk5RTcLQL4c3GClNSufyo5rn6pOgqAjikjLhq1vCgP8epVGUUCu11+39r4vX6fketZPbnuSzfocf/Wl64JafgZTHFOn38TM4eCdwJ1+W/3F/vqMX8r41EMaCqBhdTQI8YguJAy0pzqqIJIKWU+PFchDvW8Rve6mTi/vEg7s3AJQrqoDtfgFbDfRdvTu6yEcnpV1IBeh6oOsT8SUJ98vBZce5EPRb9GVUIPv4RXML9OPZ1X2OxApWLDivcVTIbXZ6MoMTqX1aXLuj8kN7fw38LSiyTK+zIrb6UVQppeJ+X70C4aGVsf1cOB9+OAhqhUx346rAf/f/FdzECZvwC5baraUglXuNYmTVM+CJH2KMuV4anIKnoZXKEtbkuqJViAWRd17/Xk4luF8Q2Ro1KEzo71GrioymCcNbkr5Ux5IkBbzxM3R5pbt0dgvpvCkl0PvwaWctdrXUos9rfarrr5PXRvm7g30H25hVH2VUWCEOs/Q7SXLR1fxLgJ99+7urZDNRt4GMX5t4nSU/ugOkW7FzJK8wtHwLm+Yx1zpo3K0SmMKP+UqFqSJTkTfB51vCNllYRI1BA5Ze94+BNsLok7DcYEDsRDR0AF+MWxJ7caZbUxXWmb/zKxTUdx9o2hZ49ejbTM9yuvsSIjgkQmKsXlDhBV6nB2EejHU5o9SKGXVaC9M1ngfP0EKGY08/msNYBzB3fWZWjyZVz3AyCUXdsKBPU4Bu008sYsMFYDl47qqIYpgmCyq32yoKFSHwo+qnBFYiiSBA5xG0Y4nmwucjqJsOkOcbTrS8jH7EoR8yZUTf6LenLjkqywG2YUQhhhk9Dtio3qgywH/+ksTxGLcH9/TAdERj4WYO2Acuxp9TYkI8U0slhox3S8yutZpUwn2cxyWrM60n3BafhbTgAn3m/IFaVvzaclQx7bCb85Z6ZRwBtTPM/eFsPDSXJRbmLiS2+Qs3pe4HFo2mr+3ksa+kLIVrRjrZPyS/+/sdV1PaU0Fd/ixCOxgWkM87Qe0uEaTBIZh5spZiALk2pT7rZ+XrfEfiPUUy00GgD72oiQahf12RbVGUDZpoQpZflnCNUvztCZkF8ZTsZsypIxRrrlWsOvxCLbJplKpjNwqh9pfH5HFwocbmqelIxLBCPOLIs2yMY7kBUp1rW34wpqpcYQdk0DE8z7/3BDmODhLuBtk2KQg4cT0qdHXcN6p8+xYn015uywYvwz09HekddWeNmckQj265XhFgqWxaWAxQ1OFgLRfnPxplXlbWKKmCodq0oa1/SvCH+YbETYr5l6FS0pS6BAySBjawCs51AxTaJbhIkl3moeYAwXIFFSwn5DlomvPNIcoHuzXHifOa+Ym7yA8gjkVpm5ijihbxXiUa1uyiMVLnIPvklRvHDN/FIu9SG4/s3GOdaPrJu48Ax0bgtdMXj0CmjiTn7Nma8l9sOXQRd060eG4JxdO8aBkVPhr7FLajZK26lviAlkfU2aOpNOkSUiBj1nW5G7uiNBCfFx9eE7uYfKsQK/DNw60sNvjUBOCxr4/quFnV8xhTbwIh9je6lkgrnwM10glMLrfhupAS2oHh+NLA3w9/NpgL68Ol3ItOjNidcC3hswO4urbG3LpMtFtD4HivGpI8H/tje7VQTiH9eIIC+GTU1ojicYCFTQ+WSQ+l2kOQvUozPKsu3UlITWXWRr/sWv56GourDoNrAZRY568811FbO3woLXWMmhf/9zApa6A4IGBJKx2XYigr5WAVByCEcJa/cd51/PX/L+wGXJIJ/3j/wNUscZQ3b++yNsI9YnWfG2i+W8ZaXkHEHVXMqT4sKkUiTMVKfOx+UrCbswY6+2PY7j3cM43Bp+dHpCDpjyURcTcJ4Lnzg1qIDubbRAuUjv27buxnxOr2fH5cViemw9XJ4OosaL+A8LR7vxH3uGS9cPgW8phQab46ErWF2gssMOYtA8FRkSyJkLibc3mV60wKMooOK0HIxSeADnXMSTCqVOjZzyLTMSlGtDVzNXcewp6kCx4g10ooJgySGut3RbrjQe+cDieAgs/TL+AV4lLOJcrl+JknoS2A1Qp62KVB1D+YxtflF6hhd56jNSvORyHFel4jpwiDgjod6hLhnhmt1kqsvACOCtWTVwlQj449+st2rU12IQ0gLt+6U8Z4NEx4c3xoOR+Rk14WvCq8oxJniWXDsgYruQCp6+RTixFH7zSbpLfidlG4H3Zsav6ufMANV1i+prj/pJCA1r1C1HeNtT+fB2Y9ha8uhcGrABwNqRE0b/0eklUVc9FtDOwVN4ADy+Snk58EXydp2c1FQpvqwnor4UW5CsHuUyLDHjDiZtlOZQ1bUGUmer1kqXSUEzO0suLfBolCRci4LtcFIjNWSUU6ZQgUqKf7ra1AO60cn5GjY8ZicbIeothpFLj9IUX0eJNQIFTIlRWl6xOMrVE6wXRf+yXf+TgzkcY8RD7SF6T1iY5aR42auUj5FZRgUV965aSnKiu6Wsgf6YId/4o9Boi2mdTNrJX6iXhxX6Wxi7/Dtb6dSc8fNyIO4796U5j8s80aJjzYZ2H0/3QLDv/Mjcf88XguJRG8QlYtbEcta1Lrz9ophA4iJPUm7FDrWMoz1qRLdnVAkHZqTl29WLordJke3/UsJAuYmCiHnuDuS6JW75s6uCzylrrixNup0LPgUWXOc4dbQlbHs9lbuz4X4UGheFdZnFyLOhaoxtYlS+3Ik03RvBcI/GTQudlTuMWzzlUGXXQhFv86XHdMt76PI1ckgxaBE/IJ71aGUyur4BY34cM9o11Q8J6oWFO6GmD2SySQOAJ1/SwZenVjANysKgwObpw1Q6JX376LtgbK20JnR3H8j1Ilhq6wJGOJSAlNNOCstJqfGv3Jr8RYay3oQY0Wd69CQHCZtcuSR+wKPyWSn8z+pXOxW/iaz1rL1CKxz2YS22bOJaZVivYIXi+wWg4bUBx6BKov6ktg9r3Vuiqu38Rz9Wwy87ymAgXX+1EEoMKeEvs/zTSXap6ZFnA0ZdeIC4YB6v/ZQR5EQbZcRkgqlQ+TSP8f9IlhXaAknZKC/uUC5juv0pZS/vhwM7Ac6tiEPoe6uh0TeFg9SHihqAqXNwWO1FCjOTeOH8KUdhKqg6Xm3EUXBh5SsBW0wOfKRFrR+byxQBxA5WltTmqeP9OJ2ofcdTeHrUOeqacWwhaclD9qd6pLk+YX9HjCIN1xUodAuzIzbVJK6ejBXC26KD1dbKmHN5ziMebQvoJ1/sBzXM3E5nX77B0HNXwBtFnlFxwnfQfm3Os6XvqcywzYpow8946x2jO0+yozyXgFpGy2slOMLINGB3oMCN9xwpTkNuJaY6u9buVCbTr7jKZIvykfM9ddQUTfgbNXs//7rQYdDZefzdkJhZWsR3IxetNylUIfVHSR+clhwq+fbPhXlGcRekyfw+uQVCH/UCdvnyHy445opnOyKVTkvaC82La9u5eFbKC/Ev2j5NCiDWABJU9y9HF4kmxo8b/i1aUstseveD8QEK2Xv0Rk2QhGMqAIUU6FuPh86mbwuUji5PcRV75mRVS03gTtZlrJrr6Y2VrMppvmXl8BSzwATfI+UKR8ZB4yufkJ1U8OqplQWO+RBqYeYUfGL38v82atnNWtouorGx5N6CY55qc+dS5Vu1QscHhkO8EgdnYxM7otuDr/SZnbJMzBbFsO9i4NSJM4XAjtfwZWdfRzWtU/5FnH11/Byu76Kf+qJE6rZR+5pBOkwR+4C+X6X2LthfmV5KZCtZg66Qel5K6couNtS1oDAnOuKvbV+rBEaCZbrtmfeBbTv6weTYyR6ZHiHNK1vCEiCDELWVT62HXxNVEZXsCV5/h5/bPpoNxUivw1hj0RG3nLZxZX2O9qvB5HnAfuGCkkUPzkVsNSaire/TAKKnKJqS+vtebupAMjPnDvoaOu826X7vCzoLotirOTu7ZdPmgTfA5p9c3axfIs1NZ31BFA9ehCkWCXVwzhEyIopUbLgSPnlSV3/pcapjUa0V3Lm9sJMcxjFZZ9FyfKtsP3QRSZ2r/xz/ZzXTetCJ+8wJOYysw0vzAtFvErpKv8tsnhjTFrGFhHxuU7w7qgVeU3Wdlfjotsa63NqEu0UX4BVpXmJEOlac3MfFcx2MnJsZhKQHKwDJtlwfyU1xJ+qctjyBZdQHucoxR/r0WSeOanEcin0n5ZsPx0FL8V3Fy6zB+r+wGE7leZ8abPBCYlqQEH9JUOGiJKqo9Fa2rvUi+gMl/OWHhqG92jYPUClP5PTEnMofOKKKBlk/XcLqxgMSmZpxaQd7Wdj34dWE1Z+4FVgmUWIrCEsgAFN9apYIqHtyn7VYrZ8Q2i3JqBezCb6dwoTvs9GfgW9yzNChJGtK7aDCG9Fq8L5/K0NpcyFJZ05r1YORrr8t+v1jixq5o3MfJNcSU+K+LsBcHABRiRMZmdbaow8UQXvy9/HIQ85dsW+YQDmq0czjEbicJr3/oqE0ItusPZwJZRSa24BBVFiZaER7G29wxuFqVJArUtPxSZtiqZKQ72oZsX5LpcQ5WCjvXjXhX1P+a7xRzG6bJ7VaeMfZRtF/7Tp+iai1S3XkeBSePF9NxBds6K+Wa/ZLscBJPRS1RMu0DIcXAPpryVA3hC5AXPkX+ZieaZet5671utbLiw/+XSEyb/kE++gVgRnDvjqrsb4Ccbn9Av99lqt+LEccKLMUfGp0fHAZevApU1OEx7HZ3izJkP0RMp9L3y5pHc5rKmk4ZRnW42Wp6Yq+wRA0Srp0IoMF1Jv9doiPUiCkpbH2IxcKIF40M/sezlA+UThVYLK03YtWjIvRIn1x7Hous8w0RdC+R/chMcXtZaol9HPHFOcsAcnBOwEb5OfQ3Bv2a6ue+yC4ePBaP3Hm9PpyQodnG3xj4RHALhrNb6ta2Zu99KOM1qEbt/XVDrIQCpCaUmnf1poc2YzQsgEn6/i9YxaYskWLnZtD/bx7WyPHobwahGkik1M7zBxv5lQAbisNM59CwK9ahin/RpMlfeqwqgRUza/o7eorecQiFjrvwKCGaXBaVB1dYcX7VfOnbfWZBStrbTOrWGjRlWHmECxJF0nlp/fOOM8ne1mO+j/Trf8lIRzOtVSpWauuq6sc5krUk7UkfHlmsGjc6LXvoptFtAvvjuNA1yWz4BdbnNYoKhT5spGh/8eYIVaE4QmkPQNp5fXhAqCtyI8ObRt+ENBA/d/qwQ/h8p5xCEENB/DIjuRhIBL2rQynHHds5PLfoE/peFoy1Zt+RUgmsgQRESRpRHCf4MEjN0mrgEI5cYs2iB5tnotdnn63LTy2Jmoh3gK3F5pkrM4xJ1FbGpAb56rC2GVTIedmcpcWC8JMNvr2Tif7iXRHc1bZkrDi8YyVHaer+tPfnsuHnr0gMVjKtfqgsfcxjxfbNewUrnEWegsuF9rUFOt5WHwH8tT2wmUBNi66yisOqhHrfk7QwmO8hADbqeKBVtXC3N3lFwUbTYyWTbSxfNBzmCoTtzuCCUk8mtsOVnqnXt+oTkPP9W2j08IaChr56fltYT9vP7z8sZRNGQ30xSSacoZeUfLLLeDqLZmoG82taeGX2d9VbVvtZbdr93Zbu2GPlSPAMsRd0mIMJoKPgb1tsOh63XKFuWC6dRgoFnx1+vlzIgzs2O4cqr5kjaJUanO25BKzsT3ZZW2KFauXuXvxX6tSljrr05fZ9Hu81nJlunUtiLMPMc/+XPNYNz3olnzS1a6Ohmc+GQdcsI7Kd45jZ8U+MFokA4l5D80ki55qMbUXb2xcyzzDLLozRGo7J9CgLaKMCxtx/3jH5r0YG6S0sAmfP5rn18jbiBFI+xBUKRmvHYgOQzfyul4kQaXL6iw7qCKG9hh5DQsbN/X2g06COOvGIMxypvqztPOEmHMtrwJNNSz4571VIiW5YvuMq/S6t1mybgXx7+FrF3ZIKxntwaJxeQz60py5mAtbtZFc9KvI2sj+WY3gGkEfpgy/RMeRgrsn2SlNw/L41/V1lMv4rm+Lsnv+ylRQaqLEJRdBEHXBgt+TNfK8yFaLUa6nuOeUMsq5L6xp0N7DU4vM1/JgwrhCmWROi5IfBnRzaiWYsoovxl6kgRIE3HulcOf21LpOWItNvhAEvLxGh2+KcPMOnT5G4yKV4W1K2oGltQ4uHF8i6KN8W5/HzZBFvRXsSbIjQKKmvb5Dp7Ku6qVLthRBCIumDR6/ArOx06p4L15r0FsGJDXw3d1hRXt5PIr1YeEn/o1blWz8o2Xr3AjyNe4miUp+V8tFaSIhLMAr33oEyW7+MN3E+Bm5dTMHqkHEq0NZxx4IAJo9JZKiqOxcJSg+RQIyC5Afqu5Xz1NYMaiQjvLCHpMXTYMgFX7nqPzDv0PAnfBfwdM8mYC4jQZDquTVorgyD2jmb6mJMLBCthCXc40STdIjtbItO/PWiE/Ew6N2t3bVyj7AP9IdacZr+2iq0M36Z3w0vQL7AG+oncSwilxsPlFdRKzH2d6TdddE+ytwAEhnF8559gd84f3hYFMrYLz82jrzqaKWR5UL3lYoG9yP9GxR50GCB5ZcAo9fHgdwfdcl/s2FRlB1O49TWjF0Lbm2HXXGUCm36pk6UNIR4JNDH9eqhMPLy3DDxR4XGV65WUjRjcRaJViwLXEjXr224jQYA2gmfEzAsv67sOcy4UDnYRRBIow2YfPVX3vyQHEjICpnTdLTRqyye5/DWeY49cW/aOZHLw1xOgMGahcyrOCBGZTjx/e7DY1Ab10twCqnNUXT/HBQdaPx2oy4H9UVaT9f/3/b7kcoM89GhfpibPW0MKqdNwO0NQnDO7GLORHKfBF0WYHbrGdDYtK5fPqsjfTVp+MYq1pnc7v7e6AD2h/4u20v00RnGAUCvjDQgDdULiYRehWRzjd42CUiN/0SSNMVzsB9OXl/UZ/YtcXuqFEoyYSQD9xuPzKXkb/IUB/8HP317nRRw8zI/9WENtselNgZT4/eZtkegNJN+lu4Fz2RaSrDVCmkMM8X/rw+ow8SgYdi2M7bTNJhHzqkR1pHviUtbtQbVrGnXn58E/+Lr+StoQraiP1bshXyzKidy2wrOI9Yvsv0Cr6TtCIUnihqIA9Ttp35KQgJJcPpfFGuIC7ghtz7UP6iu8bpLLGYfd2k0C9VBIo1XwioV1ckb/ppUO7Ll29G3pFAN37i5BOCOlkFCn5RBGMdsj3LrlX8dLlMSXbp3J2ULuqZy1b54rfmzyWVuiQ+hOB+KmNDYWrBpSWOYoafGyUpduTE7xQ9quxGpqwavqmPPHcOxqF1FY4s5a7UOXl8/m6Ri9vgdJj7lWwSKNRsW3L2p0KJ7ZohyR+9cgkrd8shRCU+4pZa4TREE1Xo4OIUpOArqUcYtl09883SkLsolirLyigJtva87f3smjAMP2I8kmFQcD9ifmIKPsLkSYOlu+gQmAYDR2PFF8X9fM3HEp0DcM+J4vXFxNi5xbfhxTMeOp668zn7XAOL9F/KAViSsNFjILBDGsdrDM3VYwEVw8DE783h/MdcchPOjHte1izxuPuW7KDK6S5qCbNWFfikCJ7dl4QqjnM139OFLT2cxbUfaXXurqBoxtxc3++gMbhFQxUmAsMjfRTSHKBt9vQBrq/YMa+wmvC2gmcrGdh2OIplKIrj88cHFFnjVW6TaGic0fFpy09YSLiWo/uct/V/6HFQVTv76Q7OQERh6+4hBIwwVTcKtf+Hquc/PyX/saHNFqXNWlxRyjWfSvIBfGP/ClYP2Pme8GN+Bos2wmWfJxtbP5XKCJRK8y0KZxIj/Loz7Nk+yBPmNUuY7x2k65VTx/fMDr94/kZuWyjokww3jSXifi/wvod85jK9pOyhe71RgUaiPKzN7Z461mwc6ItJ1V9egV7U26M4BBPJJarD+d377p01lCpqLaQBQ2EigJeUaq2ITqQzAMobl9TFd4WjjGbB+7wd/u7/v5BHDSQyFwdof/Y/Om8AHQbTiOhNrvvqFHWvBhC/n/L01lzwIWSWq9xTH3D2Uu7la0n8kB6/FOu8Tz3LdebHwvop16l04ovN4K9qtJHqELrB/8Y2Pza3c4EUV/d5xGQitThNTMtltFFDj3bDYSplAaoRTB797gm4fdj7+RS59UdzNYhAh+W9ahZllpt/zG3NLrSYQPXAOvmt3Y/wIf2KR70Has8HnY4qHHmVSxJLA9UwZj4LJ1JAWkkB+vsYaGKrZtFW07Y18ZmrmTCNB27oH6pEPZBDdELehPsNuabXE2MXIhZeEYAQY3Y3hzY6O4VvXV4FbSSkehE3sqzdgI6l8vc3t++b/vh9cZtQe9RrswcGdLVkMrYQzfEnjDnmXX4eCmPmvZX+W4j1Cs0ObnIIc/NGl/8DBHp3rhzBmbvV4lT5f9Huj0YBmIAAACMbdu2bdu2bdu2bdu2bds2PlaH6CCHXswpaXHZyJ8qVKuW0fM3giMRTNSt0rmICOtVWj3SYFgChTE22QPGmaoNHgJfF9HabVlm6CDzr9DymEapl45f2dDEnrtMrcPebz4JwS+NONGpG19AqPdxedRgRxdSzusjHM1HEHh0Ri1g5ZtFiFi6K7hhY0RTZzMLyuWwln/mwHZgPotM4roKVo/vcq7AgaAo7vroSu4O0reqhS4pFy9phJDxmFMi95pOxOXy3PtO1j6+v+j6IdZEvChzcMt9eiSlDN5RZtSLW7h1o1DTGtIGTWVQMAzinRpUj+I32/nt2tLnnYYhY+puIPdsFpdBfo38SwAR2495wkRomIUaYAQynUB4SkGh3kyGxkf/beMb0ysbjfLsGHe09Ny+YdOvPe0NMdTV8r0X+McqkCAoRHeEgnQGdT4lVDvqyNXffA0wnRmxEpwWxpS2R1pLN/1DvlTOPAXuvejteW5q5Oy2RxmNH6wvR+BAf7zVqtp1hwYiuzAk+GeP20aozsznHZLCsyZlLL788iJy/2rYrHRTn+Lfsu6ezNVTRW9DoWYQsvsurtmU/UTZV5gmL0uRQAniQhV0TCfSd0OyMTlmKi4BjCNwC+u2Rp6a1bz+CaY1PgY4GqlypwWK/0JAShFwLhQ7dQGeXdbKyb5aVQb749DdITL74Yto4w6IKS3sGCW3/aDlsDYT/J2xeEG/cNfh7K88sw0sCURFYT8yVcWQaNSf3XdDFzoJ9nJyXa93A1YQr3x5oaev7EtjEsJiR6Kf0VDME8ReHBsApnHbIBxCLXkiqx5sIRPrW0CkoELzqxwETYqdsQR+ztUEmAfhNCgM3/s0euvUDtisPRejF0/QPc0SbNgcllmdD4uSBckxNr2m1pnI/cs2TewSCTlfadPrZbJoC1lGgyBqHhYxrLlPLiJTrCBOcbaO70+lhCWlxXtjTscvcGs4b/VSPz+ZBUtoRqilx3H+3hMWifDFMwi0rFPgDqM17hJ0xKmF8W7aIsZV36hDVd7JugNdXz0Msr/Xl8BJRfV6fbSgDSHh8xbkOoAl2ZoU5sko6CeOQ5QyXJ7SOPdd+ORGoOEEAoCuuhENZZ0lzkdmBRdieGl2hATi4UdF+mea2k63LQcF32w5jy0rX5RGyWO2OG/ngL+tjNCPTto9N+8e3W0IufPRYoNrnjYjwGKIEXx6riVmiu1i8tDqdNGV28nt1RCZ4e1OGQWmiLGrfiKgra8YH23tTyrAeFjlAGbdOE7JP79A7pw9lw9Y6mcTp4cUGl5MlO38Tf3njIC2fbQ6Pw+t9RjhvAw/yFUkRie5EXaeTzmS3oUsr77HIVF+5KAHXBf7iNfaY8gj7Au3j+kIEVJU9ZZyKnGu+gnlQGslEfogRea5fWrhGyRYNTerhs2zNZTOFd6hEG/UgFaUzhcFA87gKAwcUUz3nNaxY+vOJBlaP8IX18ahBEKqxnJSCZ9TpDPi4/FqSMXqUQZ+8B8y8pNmhn1/mInBNHvcFjgx7v2+yFeLgcXcwaaoklzh96/RwQoiFcorou0vg7eZij/iPGnblYVW0C9v2YcgFDWD5swqzjgTApXPkfHLAVGJPNWf7BGAUrmp3FRsgerPIAjkSoRAC3zdkVtlAzLhVr8XZe9BmNfnRECNmFb25FWo026r0gZQDCttmLV8hhBcNsZyq6HgK9Tq6vJZrzwLRg3A3V5IKXITzjDKKIAfC1+3XyPGhi47UQW7ybXgHyfqVFsRL9ETEaJd1yhzPI0fMZ1ginz3IvH3IM7FYP1WAOpHY8t9Li+1L/vrzxy3hIV39zVjGXPOUx7qekY/Q0BWqr140JHdB8OqT19MC/oDoM2Pyqp088EmaJZKqBEt24aMgOzOFVMQCAC1juER6sTkr2rradEvIiQSf6pfy5GPC32GgLVgpEQtKOaAUpKe8ol5ZROpvzM8vZT05hnWf3VrtpZe9q0xL6YEVNO9Xmw6mDt0kXtyB2DvV7dZxtjaAtvRbxbac07gORA1vBVZyDM5x23K8zPUn9HSyeiwwT4e1IN82m5r1WSGsqtzlK03w+wOSY0OALvRsYUBE/i9oNl8x7HTKRoDiSeZh1iIhw++qoI0Att9ySWqowhwXLsxsU8GgRPpzrcJX3lIiCnrSeRP04QDLqerbgTkt9GOnjmu5AgG2ayOLOpdGN1pcFfr9pb0/cjsCvcVJS24UO74BmlYUQONWNmjbiBYAKNKuLuAviPxd/5auT8DQKpZzBpExEubz6jqyTf2YiHStkMP7OTnXmVfszpWf5awyyDWsCh/JtrzS9QKFXoNFIJaDekrsFUOimj4FDC/k36PwipBmvdUNThI0mIedCCWPRTVIgn2/hcdRkKYl6lPKAsKvzVgTo53Rp8n4ScSxwoR0lrrYbqoot3giUoMvMIDg8CL/rm5u7SqiFVZJx5KNwtx6rWhuZV9pH9OxlXZ8q9y/QRzCpaYIvSgkDgDHnexoXTpGVItEeu0hk53rn62/5D43uZTiG5zL9P/7dCW8YUxPWmUksW3rWBMrogmDkR93ed7qtypSWKr72t7joOAxNLUaeozGze/mnzovDNBP7nZQgkpd7LZeefrx+0mOarMCaqg6ryOU3PP/JuWG7zLibuuZkVVO3w3fDBCZFmNqQtvn4PpYuZnYgAm+Rpgj1RPax08EbE602y4CrK+uA8aRoW2sltmqAFvT8M5lG1X9W4nXHlx6YHDW2daS9w2l+2I8yjfujLQbm+L+agXkIItrrcWhnBAEhnUGrDpiV8PsW0QiHK5/mo6IgIrjQReJVK8+ivTpkZ3exw7dMZCj047/nkhoq4rXdG0rREGhd6yvfkK8mv7faaIybT6wGRgfFPrfZR+M1YrTDHcULjTBH/aCGGiCNw0DBxOu1i5KAS0PGXeDi8ZfuA9rKOsuhE3JHxp6PqGh+cbH5BSKcduMYdU/4cq9RVUdU8VQaSAQGTCq/xQC3UDDun0D7FbFdRdGM5Brl8Q3zqhoUzHSr53FdSLcmlIlKlLKsc9OTavnUDL3SPrOtNw7C14UFshL8DA9yg2kL/IlIxksO8VOfjm0OFg1vZW9wo1eab9zjLs9vHEY6vB4Mr9wSFQn0qcWKCagSGcmu/Naixi96IhwNfMgj/xt6yxDXyRuijAaLCgjzHWTNSW4LUU/V/K+gr3+QHTlbFocF9DE7BuLBb4WzyBI/JNLU48ifzXiapevcEpdGbS3Uegmq/rCITF6wjE2NosEEN9ZtA7FGDu0CIPWDsqdbswqdrLjTUb85030R/8r4I0k1U14+TRwZCHfw3FbGQcoo5y8/1EEctRxJaQ3ESXEdv7VPCVDjqs7S9s6xTwVosDnLSYZg1kpbXiw9143GVy77pBQXhd5lz4lh3BNsjTsKWMnCyfoMZd4/NwY3FXjwtdZxSXMjvLo6WbXixy61SdnKuyGEl1JqXFkkh2j99u0iDIDB8RknOetO0q4UhJ6x3XNGsGUrOOSEuMOsegPY7MM2M3xaRzuyExpUaaqydL7KcUK0r+05CshD0dTVzaEFSRoXBUzCCjy57FrS/BKKiVakJtW5mm+p47Dyi7Kx+YMepJFnyli6BlcAU6TaSpwDAVz8xa+sXRJzUEE+TrfGWzuJ+A+7wnqc+OcXJ43ZOhpb6LAN5f/37SgfePRqE989ebcP3IFZ7C8cf10Z/lomwkFRHU5jXpIqkXq7R3eybk+WeWkWA18kBqcMm4gpuKtMHS9v5XkV9cCre2M88qNiGBTIB+vl2i2ucYnInVU+Wp+M/xZBK3luF1PNE3MbkO17JUoaQW5jyw/Eg09xFmr0lw6onRfD67tG1YU2seHTba2OzJSx/FMTVYoVaL2XBLn0U1Bqb4feLiJD88DgxUojK1PRPW08u5jm8EGiDGEGWyoaYM152sBLdbzjbffXMZ5dijV+wVGvBLm9ZcVkuCFS14cQhj8KNu+JJ0HdJMM0ldVL42CHXZvep+5ERbY8Bwy9sLkWCONDssvo1lYewRc18bzi7kHentW10X1adY633jdWdZ+MLzdiwM/iAxFaqGmEn0IQu27FWy9iKMlnm7VpvJLWpScEdpjh7aeghuJ1G72hT2QUvSo1Otgt2vqdQRaBKrh5NkZFQWlFDeEmJ0kmjnv+QotnTPlJxqmZlA+Y4igp9L/txIBWK1gc6PV+T7OdQ7UxOs6AYk4nfXMUhVWTcmsHNhvuQ1mIhZrzJ/z2Eitz5y/S2ruVBpCzb1isP5Jg3XIy4Z6PbfgVXdcPe2DoFJ7jijeNFFjrr2C9MOE3iI7TrJo7quGrpPu7aFOcnpxx3ubwQYbpzd3ryXNJJYnVKMc6hBxQmLoIKbyDOx2XyjSuPY3HnyYm9lkyi/UCNCJ1PktSyqMD6R4QhQ9aV7LcfpKt30ZV/y2zbWNXTCwNFJ7msYsdKI4dMdVHsZ6JEGhFd49OJS0GNV5g0XSTmraAq89uAiYaMN4aQW/7Sl/GqF76ItJAZ1Yt7wCNpjsXMg1o4NAq33YfHlxyt/EJaPVDFhawGhSeOq2FLkkeIh1rshv2av70s26A0H/na4gKbJf88CJf0U4bUWqK3GYKm8Y7NrVSAYGpN1U7YygH7cF3ICQ83v5VCy61VHAWn+OGYQHkwWmPcDcZwjE/86RZQMGW9i3n9I12GFHKAPHJJ8OMnLj5eM+mqzbfaHimXLjaSA23TUrUjsYRFE74vO7yMwEfTjrv39bxHCCt4WHwMzKa1t2npYnvizARl/BAB6gSgSNBAcbm+LIGJioo6EbE4WgEIT9FIvykIOfmFozXvNNl5YOo1LCdxFonRo6Tsuz0zlm07n5qHCguuBR1MAhgmp0ItW1rGNC5T4rK4PDbU6OW3uCOx8vxv3cdgrG7ibQE+eZUiLR0ZZwmkwHg31RismW/osID+t6kf7o/iKc+c7eTmQ24NClzgP/miOEeCfMcuoWDvF1LTmJJjyNyhE8ivBLIUf8Fbxk+Siyd5yJgmdRTrEPYaxa9AnMauDgttloRgfLOdt4QF2FCPboXDWHjtXZat8sC840OJdRpmtMYWkwE8xKazggsd47x/2a4KYuuDsqt9T+RsFCeJur9nfKeAGIuYZ/CboaD/fHVkWnPDSZ1cTte55o20AjPkuc50IahngTMdY9/9k3CwH3HHBtkO40UuczPlYEH3LBkJPVmTJ9TX6Am6WcSFg6rjm8SRIJVATxRyEEilv/mWgWHf+w1BXxAUCl0Qj7weRGAhTmdKwulaoO6rZ/IOTdCamzJezuQdCn/EQzWKFTy4H2jIWdxY2+MuCZNYZ2ndh73wimKVES9D+EDI6G38Ulkz3b2KWSSFxn7M2xOzdgMia90ORoNg7Si3ylKTWtNnQgYPSqU1LWNQhAwfLE+UV+dQgrzdYz+TbT/NYdqkX4M1eU2Kgq2VwnLXqnLhypYFnEu51vWeQMBh113EIrqj177adVANsDbzKmOjOL9Nzt2iOcicYdpyvLUrjz0AlxFQGIiJExsfrooaweU0QZCAZ6zubf54Axv6Seoes9aAA0suUBqB/gL5f+Vq8i+dPgWgvqhq4n0VCuPFR+YmWLc6QlQOl3ZrvduhoeQf4LA9pXSKMvfzlgvdLstESd2USkmIqwyR7P945aBy6aPYrqYdp9+nRd6Sby4GFiTUW8JbXgiE8mpF+6PCbvnvRZ66nVkQjIYmcF2l1l0TKf40ifHwcek+lqnWBtZoGeH5dqcd+Zgh+Unyjlag/lyQmTKbsr69uyEF+QMoPAWn/W8Wfk3LPEm6lP4i2bWH6xy/J9YiBPa1Jx1+RIVFxm+WVtMbdQLAeTQu39Yne/6gIDmVwTtZ6PedDxPsJzUwkU1hQUNaw1ymEMZSBCzMtEhKJ/zH8xBevHIuUQutO4Lx6XQj3gLB3OfX5xKT7xyCmP+h73+x6Y2sSv1O3o49v1Kh3ykwyRbbToJUx2+QhMiqR0pFDhd1wARQ18aSNG3hhiSoPLIetzDWCYe9VaMQ7rDDahIJUnySifwNmWYSHcNNX7Z1vPq0OhxdxhdOX3+wakSHDqnEbvaEapN338zKT/uEuSpm8ALpXgYKXj3OjKtBQuVw9lF4fPDnVOKy+M8cBqlFRvtX89WdTJulsJVwX4DaPNAmUNLc7TpiV3BTqaZDsArL+yH21xu1IN6KgqXJ7f6N4YVrBYommUwEOI12SH+ccz9g8qxq75XQur6nIWvn7kYyxW9LjZJGFc/yJ3w13niSG+1CGvw1/Jal2ieqslB9tgpQl1vXt8hwX6XaPj546LCoe4v1pAr41FyM69EK7rltLlXxJD81f9lSCsRh0adSqS3KRoUScAOxlf+W5hfqdtQenkESi7s2Ynoz83WrrJ0ekCnd5FmByu5Uf++SbVLuCjvJyFwGsmdg20nOsMdblmaWc1EFf8KOBiwtL0Q8GbJlfRsAzk+JJPkmrzdIlVRmud0T21TrqK9MamZZgJczrdX0HVTzxqDa1CZtqvX9d9p5Qn8SJ3clnjt7fJtzYy56XSikQnmQPCfdJwsrQDW7Fh4bFJSOjzDpXepl3TeymzSKy2eN9BrhlvpbYM0K/wnUqDJdD1MP9vkDglqNEUmD+EPAw12E1MQxDbFP4mM1DQ+Y0vTVtECiFvueywtSKX8om00XzkRdlH9mYGeJSChHf2Es7eOS2Q7T4Pa9QpmL7KKufhslwJedmpOOkp2zQS4+qYUNo5TsUmyEZ8gfjs2HIq23/hvu12W/EOd2t1QDJbyLDvHUo7SupBRLnd6ZmWexowyWIzBAoLYtswuOuHGyvUNGWunMs61InMtBMn41lqJhz8yMRR5P7rh8SeeonezDZ+9YTsFoDwqwFdrCauY7CbgpIK356cqQ4v5qqVVm3adpX4SJjEm+qjXK7D4x+/XSH9KysPYWrDIbpAj6f1hafRxz1oCRYnjcJqFfirHfK7Vss7b8h7ozDnECvPpTcGLhNmbNJaKjPAH+AoJSwThqzyT2m+ydUzDJzE6OYK65jb/F8Oo3OLJK1eSZv+40+25Mgvzom32v5LMmWZShOawDd4PtPTgONHWDfWUywQU1GpiWMU5qtBrCUXJLKQT+kvu6Q2cz41GwthiMfwWph6ueBbK7RxWpjwWGLlDjDX3BQ8Wnk9tKBTVBR9ZkbqRCaV/XMzlyn4xuDqjEV33slvU5ZeislN86wSBaYXzWbyOPntgf4jjHVGdrdSXRj47Uf/ahIUjzxpE/jRmDCwC1ERCr3q7ZVLE+wrWVoJj1PsX/Cap2E8pxwfFw6xvoG8Y+2h8N3RYP+saWgeG46CSOe+RWKsgXmIGd9Q5ikqjMMdyWKKhBjTzPUxEq1G1WpHF1JihXsUxIMJry0RQgJcp1r3XWEv4gpTeI0nOsLucZglDzxNg80jf0fwndd7qxQuqUb/bR+ah7g+rccyRrTq4mNWqIBxwG6MIulf7kvFhGjVoiS57xZ8QVD9UcUMPE8RAW/YYW+VEULWQvN6VLAvrUuaq9F6naNxwhgW0uMzMHbo4EK5Jbgv59tJ4hwHaXa6jznWF1K2zGD/u9z+IDewsguFN5BHSEvvvL84NBNVRZ9Gd7bJvOjwdRTRVQySMndktnrIEqZhYOJWyVooHgXNYl0wsJ+xF9UdivXvOrmvheIg2uVd/9rScc0bt6N0XOc/uZOZ9XLynUmr8HUYQBQcvehris5nE6l8kKy7HpdiQn8iIYG22J84GaXEcyvmQSVjdVRymyixa9qgULQObLUevJ14NjGvibm9UrnCGFCqtmOSuZZGGo1PeY/p2U1xc3iHpBhy3MbDM8MecDhHrq9qjj7JLfDU9wZ2cHPe/pYvp3PSZ2fZJB6vWu8/FWxbLzIhuLMNtxtbmdca5zux2WVkvLlnDU+YE2B+3Twn/LVKvX2Rrs7jrzoDrm05YSFhPmXwT47wr4EwD7LTfmmt+cUYqy6S/nglVRFU3miWbUsXOqaV6xUfiA8sH4HZts1VlPe4ncXeN0DdQMs4UPFJ6gr6N0uY0KHKgMkOmV7yPSw9PQksyVUOXxrbKbmTbro2VNkv9wZYMDQw1jzx1DdJPnMJdGxzZmofZ/Yl5kB14Y3C5dhJi13ILYyMRoVz3eTCUtDcShKDj4rhANUNQxkfTJOTyiyqhv7MIXL2WCE7byvAbiA/MdjrtyR01RgFCSP9mMULDbXdS1vO3fTVeTfCQOw5Y5nr/fe15M7MNIet76rihw0TZiAStsRWOOoLBFP7Id13DtxZTDBXL4KMQ9mAX/j8IjlmZZ0txCLeaVaA00PF34+qutMRI6yrxOr9b/FL0TnfMgP8R8ITKvAGPEbbbz5G/muRDFDFBAWQpXtqZQGznk1Ho7uIKgPv3Z7WMBfa0ePGWyzhTPrnhoPSoqVDeGjiYRDb/vrD9TmpQTgPQ4xWE0ZstKPZIsESW0NnX3IayM95lYx+OSSKD657RJTQpu4YfjkcSPJ6CeA3sPOyOqiMViqRhy1PpuGpFV98HtUpw1hIZNCAh5oal7Qwok5BcKpkH7HCNCgfL0A2kzS5A3MHbytFgn+3xbkFQ6KOGFkRsh6gYT9GOTuokSW82NpBVQZhWv8LCKTCPMV8FxCASitRBUjxmtVumqu2Bsfj1ELLu9fYNF32phtys5ZTbahQJBacLWOYyCTbRNDgDdeitKsPKE86Su27n0Kp/NAItz1WMQigfCyB92pgQ9S3zw8xbHHGaE3YVCuFaaEou1wugCL/LqiYha1Rp6Al9FYMeFRkPw0/Fxx1cBA+RucdG2bQmAbjr3T7nivVKSNGPTZVuzbB26oKggG4wPT8zRG9e1b5JxKU0dY0LZ6GffA4XiZX5hScUknPgTUjI1k975kiZX8LZe8611Lw1R93ShPStUzceKi2xf7/m5xoAhcsK0KGi1iOhoMus8u9wO51WJZ/ZAX1sUa7Qv3m4DR53+ZhK3DrwOd2QzkdyrpufMEkk8r6yT3QCyXv9s4dC2sEO4KyqQfR4FBFqBEUa8qwKwWda5Xv/pdL96nJqGgHV5nAR8l1Uygg7bRDoCTR4jDWF6BcnG6+OUFHBK+e/KGwNmHnbPOK4JHXens+rnZmXD9pO8qeIZh1u6r4jL1i+w9znFVOnBvbh289GFsff/g8j3dhAt0DCgfY35Fp89xhzGJozU3pEPoJmg6ItYt+uSdqCDv4qxTY6QueAjTtOOtatAcTpA8M6Zqevp/CsPnQmW0yexue8Lu2Jk92/SXeffAjBBBww3GDhN9v9dXRnDdSTWixMixkDVHRuAK1c9Fh1MeBKTqbUQKPKb1YX0cc+iD01/c7pJ7gaWviDCPq4cgF32eL8BDIO0vdKUZ6vOcUXmaxPZv2KiufApWWDtqFwzQFbuubC3UN0D1vZOIJlobem6sB+R2AdCvA83z863cKSRZ4Z1M8jGDu6xlf/AM6lVXXsAsfZEm/pjH8omNul3k7lG4mwaTvBXUAlGcds2gA1XfXPThpvECTNrDCawb9WO4271VaFXR/10kylErUKBQjoo7IZvFJ02mp+wN5JG0yii0n/klWChjMH3igi+mgxMPxvXqmd2wUfSRAqF9vpbQzPzzsjyaJVRsHzBF2RyTBn29ZmiENR05FdBKdpYD8zEw85qN59qjV5m2weBaCd7cSG+Y1ZWxYetZyk4u23F72KWvGzl7Jxc4ISf/Cjw7F31d6tOYfXtfSFx4qLHEJtrKQ98YCSBpLs6hv176Q/ODS5e/+0jCmRNBjh9dEOUIpXDbcrAzb5/9NdRllhCSwiwFfjokPqyppwaetEsQv1Y2am5uHuDNRUtl14JMF814J2mQZZrLQTAiT7RwvbV2btoMb0k4p0fhBfl+bxfdPpUM2fOcQwqxxCH+hwRu8gneE6GctuAUnHe1BpCQ3vkmYUZNSxw9NPyqzrchAZRgwVF6AL7a9q3cFXYFWA1UM/81GP4T//QKd2hPgImxWWAYo8xiZ+5nYB6mhR9z5Cmld8pII561uKGDUt/ng3F5Por8HcRXTF+Sgmqq8bYgLEmahC/9soOyY0l+ms+QupBn5pead8+TDsQjRISgZKpZ6oz6c1LoKBVShn9PmiAypoJMb8wE4Ovor3DpgxNRsVxiUxQqgpCqPfs3oiPdCb48ARU8v05JuCWuEcsJ0/p2b3cP6+4x1qvCQt76g0FyfTbh9psUkFSSt5YA1aUX7rqG0ME8mHas7XlNsPGCMrNsV6TN+LAbGwfO8S/wiUdyh0XD4xA1gnM8fCY3MlCsaylmUdw6qWhb9y85LTs4mwXbjSyW3HC97+xtTrukIJKa8VZpLGQ3tCw4a6yiPu3Jd9xtSsH+HTQVZZgJyYG1stXUDq3ymuEsCTPghaT9ee9BPSU+KrVIhQvcBJoJa0q22b5X/xHTmSVDxSCZ5AgERWRq/8gSlIjkNZQngPO1BzGrOx26eaLSEcZL3hjoWCWI3EwrDecmB18IDADwS2fuMyj2kNtWRhA/zqL5GiAgnTZ3X8X86PyOQpOyAl49urCo36dN3Au8uPlCQukC3UJgXhY62g8c0RaY5N2RuEz59XBoN2lOYTfelCg34xyd8CxZgpvMTYgBwXfv0ZP/vh0UR000DG9dg/G+nj9hs268GnqfTtjoXl1RMuC+sYuDIn54KXL/sPrTLYgFeorXeUs8Q8PncfFmnImGEYKQkm5z7cAWLAJzHdvuVXHcGvPzunHdI3umHnDR2RLcD00tr8x7kS9V+ZvAqsjumsle640aa83lUPe1uMPpPCFscRVlaSpcOWglD46TA4bsvM9Q+O53A/cRJuABrbBLKdVgIwsjm3Xuswn1faldXqZux1t7h0wvVHQ1ZvrzA0NtbT5rH2SiWDZ0rpmoy78i09zn8LpVVTj8gfGdE24R4/7fxZxGHSj1XJxhobCsUPO8ErlCoHCEBoCDSrwUecoFS7OYdJ8Z1he6kjE05tdybJfskNkxxF3+tIt2DRiYE/s1MOGBjg9B8OvKNGesi9DJmVxpCWzjrdvqHk7hA/Lr+zX1mvOkz0egWEP+oRKw9DbaXVqqJBDOnDYCpZnauA1+pq4SEpWbxUtKBQrzYopxa2NKGMSEpsguyUUf2yg+I549HEbddiDOVLuv/MBcF1nw3o2BL/f84GKwHT2zObE4ooFJ42KlAgj+2iYKxhdc2JKZhIPfkW4JW6JR8UGl5R9EtGSuRwEcV6hsSACXazmjL9ptrTCfApjZOO7MFi6GHxAgYhkY7/E/n76hbYZvjXpJt01bQ9JST5GyyfRJ5Pyy/aI317VNkZJsVka3GxbM6q9t8B+cPQqqlqeneuNcmE/pUBScjUTYJWtlc22THRoDJH9ZGy7nPzNWHWG3ekV52WXeA3y4ghN49xESPo4zbzW+S7IdEkgNM9h3K0zvoymt5mPlmSoHrJ57rsjnKGFojjFIyC1rfNGR9IDm4wy8hik2ZN0dre3xN6GSy+PzwNVhClnVgKn68miv6ZbvKAWB87ZvbZ8F2Om5T3M0eH1EhQ9lOXAcM/WmAdKURsrEec9YeXOXHdMq8RdBxS1RRwPmwnbguUHZxHSdSTJU6AwYr2afP0eXERRVXOzxnYDDzCTw5gQAuW1LRwwSH5kvpQqjH4RJaE4uMZsQIxg7VhiBubJwPVML2nIp+poWtbHbTAr2s3dqvikYemn59xEQs/mmFXE43zDiZhtWjV72iIvQFwoluegVmRMgGH+hJ3dUJvv62VB3HFThVqGak/LjQx74pIhEry89hd0DueyOB5fEaJ9DfDi0gJtJpvMthXSKWdxYfFxmrlfMhXF51mFO9p1qIZiSdLxcdEVRmzGzenye9YqygqJHZ7EZsD6/LCl7e1KYSdPt1MjMm0B/bF1NzkaJHmTezzGOI9BMisBTRfGn/mhoN/VTaE+vDLc3F/pL4KfslIJrB1Cmqa1tDtLMaKRm7FcuUVzd6aQJDNiGHKEv3BrnbM1tMcvbBUtZClCuG40tpD0IY1WwfHYi10qGjJHl5BGi069WdID2jIq/NewB4q7lzMHKW4cqwjFYe18j5Vyhrt/U0XgG5gFOm0+Yuw9b+sTbNbUEkaY6i7BvOasrW1yy2hWsfbk7ra5B5KRglsEkGcGhZ3ezGugqO7i3blZonczOuXf4dgRh+mu+0PcWNBtzvOCcfG0COMFXnGYjbXK3Avl7pcbEH1w7gjToMtvbYm0HGUyfb4PwZDyFVFF93poCW+gmta5/GrxUWdOtKit7l15NulZcKHr9X7V2yjl3FOyyhvMLdr4qsz21GgPmAGkaHWs4lHK2G5qhhxG3jlXJcP/GDJNSQ0lFppbDqMYoBHTXci/D90+PlDrbdB44e7j8RITThyW4xN1yq/J+IiPb9C9yvXqGEzQURY7jVT6qP1rW29XhD9cWVBKQ8ZMdcTXAdmUOYHhOT8QadE/Sk3nukI6gDu1Lnzu7kxqqE/MtF8HSukV9TCY4wSvVCMhOxKXbXKTMQAyDRR2ECR/6arUwmykmmkFShrUs6WmRIShON56xeLJYccmkdjlZSMsmQnEAiqpNiWSDfRunInXpd1CDL06RFQ6yFYwkmaey5Q6U1rOxK1Rgrho9iex3PkdZ+jCfN34jwhVT51YQzc7biKa9zdCWKiVafKOannVxtEhD2fPWf+VjSSrMreq1Q9Z7um+Y4YM76kgYQMgcBhcyrh51gcC72CX5fYo7053vZxXYqUhuim68krXevoiSsJlnopcn7iIckSnTMNWUNyTOh9C0A1TNyHTx13ip3kSRtcL7pe2qYHLZwCU7UXVlqyyVM5F1Nxzkz1VdmFsdQrbWKxFtZCuBKc0ertJGoK1mBwNVXeQh2QgX8EY5NYz7EeufWkG73SuOvxoiDqkLYerIxkaW7kdj/j5v+cnLoz3SLAla+Mxu8FD66SOehTTyqFvWKgyJIZp8qkQpM6AVhj9MTtig4QQ7YGaN4Zc8dHvIT9QHECSOK/1tz24imDfELJ+JDJRSBJpJRHkp6Zmaujsa4Khz2SQ8XMgG94Npjqd2sngvO8ZVl/vS5orrQdEqee1QYw3ZyCHrvT7rHXOuGcuVpcSQw4JNMKaHUgbn1kUy1tZE2mXWy7sbx2/E4P8aSLvp/3JI8C31dMeK19W5FmmCBN9saXvxzlW/kUIGrUX1q7J4ejHx4Vi7yBfezOF0lr+Ju/2RCsYNSuN+PZbYLTuld1CRNq149H68Kvi1HZzbHVCyj8ELvG/D/CL0GQG5htP/UUsRZNt+//AZERAFPbfHuUJcvHDLwnaki1IQF985OmjKqIRsITzrKeSoAGHE3BQztA5GGkW/bYAUTlN8/yYoyzy/huI1kY5Nq1CMi/EZKtzaq5XXNlXIZMBsiUueM1VCVkOHoBXOl2twHaiVt2T2tE3WIwUvXkBnHZ+4glTs3lltoFe9BF1G+jwMhNdzbhjjzSZ9mXYi6/YmQ2m/Q06PjUBnTGAqdfD/lhYB/elN6lZagtXziUuA6PIY7S94OQi3kXWSMrWIWHReXjhBWSps5EtvqP42FZoG4WH4pGMOKRX1AZ7V+9q5icMVr51DKUcJZ9pZwL9DCJp/OToJ8JJGhle9D9CsgGdBtO56Bu05xDxHhCyufZmF0/qMi4+rv7x8fES1rqb5incHmWpSXktzAvpTBcLxYWlG7lUZg6DiUWEMB3LxYNvlmZS3vIKFOPcrvCPAECnVG8GprLCEeX0rlemTZpKP+l6QrmCNNcw3kOnhDYheCVtDz/WF1f2Z/qI48FEyi5Yqt4U5IsU+dEuenASDCRwPGc3uk5zui+qQ7Yyfr2hbCDzEAvXKshaSEgkPjdwOUj+3JwSyF6XbMSFO2iQAAn1h4w9I3DhXrFAfPXGn742ranTcJtCVsHDR1bzArJZHeTgSltIhmitb8BQmAvUpt4KmfhpB+5YP6CbN0OcbVaDV0BuvuuKs/gxEP3V8UV2bj+nrPeId7n383XpcBSsmyeE8Rlcv70DV4BvNe6qW0H1LscS6qxUx/fb4N6yU02fl4yRHQth3ZtsrouZa+oKkjg4YvfDShSskYJCFDQH1+hoqOup8CibiabpYIfpXD0yVKn6IrSoXKmQNOk1NG9/HyVqxx97wDyYlGUPI4VQNHKWtAm/bKNsm/ajn4Gipw8hCW5UgPSHofrSqzBlg0T2+kSM0F4l3RGORm263Uae+j8FqWqzr92qGObS8RGZ3Hp1S8AY0ILLBTs+z3mNRX/zYq2CVicJXVjkuKHk7+6EOY9CpVTV5JgZApECLFby4fZS1JE0Xw6JM7hoWWRyEF8/4MgDsb3/OQ8JPtvloBNUGl02dOSRE3+1PtPAdt9ArR71wqEsjryYTQ7jptQu6bF56SOK3oYhdkNcTVgBc7CrVw2oI27Hnv04IpEMRtRJi429+vSx0Hbi7aHD2RtkO6FxfT8OU/pN5nLmNv5ltNy95ATadSKle0MMblZZxs8JanrSTeJ05+9e84qr/GNojXowTUd50Cb6WYggZ4JhLg5Wl1cnUsDgGSFFZgUoau89mvO8p9wBR0nL6ok0EBIT8W/8Y1Q6uIyAhqrvsfwpi5I5rrH6qqTEEqvwguUudjnIdHq/wgOgH5fqQ7JTGcZ7VWmqyYcOkav/1dRiV8ljrqNdpBBAa/Xz8QcHLQWtWA+0GYpjet8GMgf/0DHEqLqmQh4PVzCdVxagVAnncPIWEMelUNC+buzcWwjiNxGlh7ut2W8D/g4drqlWhBW5e+hDjKsjSmwbKO6K7KNsZUmxtVj/PW2uGiKdpAqWOtsOq+wRUNNwCjPMoRaFROtWbeLk81Kc9eEYo/29PJUPfRgBKO/g58arMsDKZZPT6+XdmQfwEfflKflUBAb4Usr0Ay9QCXu2E4We4+PJo9Id5/NQLTLJNwkgrcSAocZOGgLQbinNjdAhyC40qc6IcpTqlTt3jKW5R6gLsClLkgZR6Sp1TtNvyL2PEqB0XOPUdTc0TmCkG24F6Emlkt7KghauhNmjggGsAA7cvcv/Nmlg1o6Wb3Q1rChbNd3u85tRQpStel4aq1mgaaGhFCcm4F/rAZ6SKmkCgAck/FIEGbSTgytlNMDmLmvnFk4Gqgh6r/b3MKZnEzrYul+9EQ4mvvAeYYczpqeSHTakOPmZV4dJ2twSQSwLR3wwLcGsZGIIpegaRq1lvqAU9W60nBLBxNEmco2mJKOAAdc5yaixdQzqNT9iWGTtUQDFlNuxy6C+mzInAJICF74By2oMl200MCozNo1Fr5lQ9bjocFBkJ7l/KKMBAuWcnC02s2GDMPyKO6yk2yHq20ZQ/PputDAZJTH2fD+DnaUBiQgILNel1vaebJP7zBXvgBbjWYiluYzyqjaVCzoJaR3pnp2uRYHqRGya2gn6VtNQ9qwP23uOC6XvPTYJH10CnTZoGE0weUABlmLDma3rT1zrWR2NBaORLoYSpjSV9tG1N4OQJtutRU8ROvo0q/duY5KPgmcNYpppsguTNEpwazRrgUGMxFELWNjzM7VVszhTLatV7zzPVjd0e/sF5j8sSKy3tAIeLnEtBxDHLo0aphJUb+CN99QQB5wWuHr2qzWv8VgvdrnG7x4uZUnANSq4f7VMvHCGJZ5P5jE6kGaktlFJE3JvsupwlYzDwqlR1oXKLwCIg/Dt0grGJdLwuk7Tr9a+2XvpFlTzHtHr1RhjlrocxtaIl+v2Xzb+ETP/uxfrI0AO8lQ0fK8oL8Ppqpv6c8H8cIsXohNGmAabpuh4jmO2JZOEvkdyFSiaa2NtZDy4iwlEk2Gan7BFDaUiqlqixyH+KQdojdBPrtlbGensUsXFN7SYburb0FFuezPvBesiApxtFKtbH1isYkMAgvoCiPgOD7tmKnI1fYQfZa0ZdY4e0ZqzuEO+VsqIM3Bz2IciCn6oU5vpw3c5LaJPaHfkEiIwDukfdlcS2jTi2yN7gMgqyBkfLN1c0F+FSFemSkqLpAffIBYbS1jyIjgqAPAtZ/17/DuDJrkH6hW2GHrf2iiRXiLbAZREGD2lXeCmhl0xRWJrE80Dq1rA+7E+bjdjRDXC+FRR+/aaXWKWcOg8nCgKGcRnHr4gJT/16flmReFLJg/7k/va8ro/bdiUgjeaJrqQorpLpHOdpylGzWmlms9DmmNOwhm/I/WJM7S8cKhtKXJQ53qG3pf7M/84u5gRcpx86T2fvRPV7d8eYphhc4Kn43eAlgEu5pYoWzcX+mZGj815moGPndMGd8oPCzq9BLpzBpa00dVOq+OhEP8GcKTUybeyZt9bdFqTZn5nxE0NkPwND4pxoMzKUhoTulLhVBG3XZjioBWZFi7oAH1R0FtDno1UolMxIMwViolmpx0WDg5se62LWPdoaVa57aoThdyg9jmcSkyKgDOyFjib9MlYF8F45xDStwCYAskJPfi+5I6eay9b7DZl5D7DI55LxTGavXbYErf2e66lA5XzJf9YgoW1zieG1ZE5zbnG11h2Isn3ELqBxMxClT7R4aBdabUybciTca3R31aqbD7hPE6luwyLGO1Lrwh7ZKpOktvIX6j+D1SApKHP8+zgooSMKkF+ArMdYqfan38Q/v3s+a0JS5+Ta/ro4AXorh1YRGX0bVR7Dirge/m6r093rRtBwdwkrg+EmHv+24/UepDqqXsLLvCphvz7Fqgzc6H6jXGh7oxdvmvrdyRx6T5bPeiKBc4KiasnnAJ+B4Z4cTF7CKQfyjwuk1abcUR3d5wSBYbezqSyArKAJ2Q3VYDEi3GPEufNk9uur5wvGI+IQtqwUI0wR2uQ8uUA2/W4DOXIVCI+rb+F7p4uRElrmNdQfkLIHeEy2f8eoK+Q8mCf1ugqNujGn6895Pvqlq/K5olwIhYOkYENnNBGIJVn0mLvEZqknKmGcTsv/arGm6pG7P2TXx+dizZEJ4gE54RXpUer9qQBEGy3Ljm9LOSc7pYbu25O0r49A879dZnYpdsblx06+zczQY9QPgjfrFtes0lC5whUX36li6v+uqmTo1HbXpagoLBm53x2hXx0vJTA5ZN3Glg/79UkxqhMqOTXh8qxsl+ar46FJOWIH04llG88POS2gDjcxWK8bBUdXRg9avA0RMLhJSggAkO8B11aZv/5a4XzHXhlCJsnFs4/7UdJmmYB/RnusNK+d4xmFLzVmIMWtx5GU609fro+LCig82+DtPFAZ+2GFGLXLjzo0Uuz8d3hsVqRtTqHF7i9aVkZ5xxIvumAnZjTxEA6tx/R+1UAQw9S3T4t2puosORqeqvuvtqI1w7nVuprGBsnCpgjSC7zJKx8WP1y+SFAB6pJAswr1xpyh9gFR6JqNr6d59FOLHMNo/AI44ppqoX7zpmcbY9gMMKlilspWOSWxklrTSp5iE/Asfkz7eKbsS8S4adgjFQD/MnbhFrypn8RWPXXj0BVBBZNulC5/85rhT3iocoljHum6U4mPuCwrK5QwHlMzMzvJlIhqUDyou36aSq5ngjjGPfxK+XasD5hW6xp18OFv4V1sAAt3FuJLZRAYTHFak5aCWlniMUn7+EF2S4G5h/Po2xvPUAE23bLVh5QggrnbayCjRo6E+EItaBiTShbDOd8D1P+5FPSZie0462W+CKImQ3JKq3/lYJCt7VQ0Yy9POuNJo1JyWzUHaEgT1Q64fj2QgR7hLiURN/+FXVKMGZRFkTK/1kr9GyBlUk3QpFxVdM29AANJMC6RlY1qjC062Sp0EnlFijB8oV4Llrn6Jg9Lnb8hQ6dLIkAfOs2ZqX22dNxGd5hC81JXTGQIif7nQJcN1d4s7kOO/l8GCRFfr4N87NB4RWj7H/qYeqrVJu1W1NvAlFBlBZlBp2Uilzgr84DlS3FFBlJcSe1VmWwOJYkTy64RLHtB0qOXEaPzx1DqKsN8pjYR8xsH/8xU5lXjuVm4w42ytFugyGQnnMCDIAAEd8TrkvnWgMTs2Kcnwk/ZNnRhiOGjHTuJflJj+QBJoKwwmOnXNmT68EioQ1TCSvC+/Gb9SssBrLL/NBQN36Vph/qQot9+mMsVPbAgMrZo0BUUpMoyV1Fz+foTmmdsw9FILTfNQPcuAoM7cCaSD1hmKgpI/FJDNiHzs2rUTA2CVUC5xe+Yy+WZZQzeYYOCPCfdH66Ch4EBcokfVHS3cpjzmjI5OkjIA+sO6qYwGaJ7EhSXlDavl+YnNsp4WxwxZ/Np1dUK43mnbBJ5BAO7qVvfA13ntTnBKVUt4p7lxqmQMXeGk9FL6oAhHbT0GYXrLbrCYeeydXAYl+vwpR35RxIwYM2OSU0BhFn9YnTUtAW0VhosCNzMHCaaMlB043m0oIzFxctgUshgkNy6XwNGJtu31k4pPgWVeLaIEDik+r2qhRhz+D4fHfGYEG1c4we3Fuy33DUubWU8svNw04MwgNwp09lIHD2LQGK3M8utpEw8bkIh2K7Q3ACq+cKBAXAqjiFpPM8UdDP8NHAX6EdJoSTnL1RICLfki//tyFzi5SKKCH2f6c07dqOhPeftwd7J6LlpJLxMmo/O5zhLFVwO9oqxAdrg5zc+j025xqbTOGREfPxmwo6zuqgPaNtChi8hqY2v67r9kPdbMV3HbZ9myETomyUz2aEBZM1pZUbFpxZjDS6IYjbBGpbZU/wLvVacNpcS4jlDEknQSF20jBqYU0Dx5kvJcRBj3i7vRpBACg6DG5FqZetqNm6qGdivqrWJfVeykOWrDGx/3MkVZZ+DCZ+694Zd00ijZfHB/FSTxzArA0+rGqYrYAEls1qaM4oWxQP01TFqTbnvbKPoxOIqy46NOKoVgzSKvEkfdAi49OTGeqbt8IvNW7g1dbfSGGK128ysC/FwEcm3/i1lFFX94ciZwIXHe1jy/qTT0/mA81nAL9ngJRWo7pZ4Vnwp77vge2KlXqP2NbBW8/E4GRSzJmkki3gJu6d1tllGa15z/Nl+RkPC4BZQ18UjznOY93eYN6Gk5HvP+5mndVLXS/hMZcTPuw5YZI/wve/BhDuT956d9LO5bDuV+YgXBPxuLQPzy2ahcasXoM3Klh9/6qCvWY2B5d90h3+iMN6cDqbcgWgdToZtcbVOWPO2knHdVS9Z3glJDWdnFn0UJvg7+ODX+WoPsn2ohqqTLD2DI2jb2OaLzP5+WlzupSZEQhPKKAnZvRkV0ZVjhTJeZm3T1q5ZHwS5R0CMlaMxPZzjAmLXFE/YezVgu2ADuhsUq4mmCs95uhKLtMvto9QdE0elvIhi3/edWHWPmnOlBqXlE2Ivz7aRHK/6vlQKbPIjjIlCNX2TPUagdL0wmMzQ1YAWavefpaJRsYnIZK5SahiAyCAXhFqnIDpAeXGd2KEdkHWMlISTcGmccre/BY39zZ+QZwv8qGjZaTZcwLi/mrp727eUaeQDbMCAVRULZEmZh26fW7WbBExGuIFtVKcC73KIFaH2fOvbikF7v0fVXQokSz7J6mOXj+IcV86PScN4drtCzSrinlI9vbGhwvKy3MWoXVqdHpGKeZJw8qBvnMqz2IYWcOec6oTPnv39iTTb3PJqwQCsKy1oYAnzgVs2KVDtvMTr2gRtj2jtVdtIBzqmuXUpQhlIr6ZOjqIzC9TYJHdXxSSdLwdth1S/qsNhABxsjBPxQWRsydMqmoYaguiq685UWaoLTtRlixb2py1S3p3FKJt19tcGVOo5QDNCP1mJJIZD44j6B/glH7/P8AOBbczNoYeb7MMv49CD3F8FLRkszgVzjxMwJv3KCTZrty0xUJ+zyJZY+/HVzrmYemgpIaByyM5IRN/fGL9xIsJW5uygLbZV1TCT9PYhQHzMOC62Qjpnd1R9RMM1q/OuQkygGqBDiYOWsESJErMZnLSjAvvvTDE5xhLTdC+dVpMAn6OpMkQbh4V63eszj26cjQd4yRixMMr2WAcXJwqbEi83hYg1UqxZaq91kOF+zd683tm6ZMmaoHfudzjG7dZKrsw8gUuopqRXzSNs6z5H18n+tULLWLlOigx1O0vZSPBiCLrJtydWSBA01H7Sl/UwZWaunhxY5sFwMchUOJLiNtz4b1NJVJ5FcH3UCIwl2gLw7Lefs98tjVts8jN4WJ4RccQE57OcvPT84EaX/xQK0kznXL47vzgMQf6ckUGR+pC8zmbQ3vYV9IMEuPa0fulqeMo2tkiq3RUk8osoe4dMauSuwGlpUkIC05SZoHofMFaoXMV+HJdEoFdwmxeO3TWQRFGVIXtbUWLGv3/An9fnsA17zTekayKsYoamEcuG2H73Z1J4p//X/xPyeDnqrK4oayx0f+zAHBCBNELvHhW/hArUWkIsqKarSNvWQmudGCk4INeCkWgXJnRLKLw8W/nd7j8NubK0aDzh6KkEMrVk+ZvV5d8GE0vAwWJ1uhjR+TxEJkfqwU3418h/v8IH+4vFU3dqfuLKUC0X79Vck/0yyajqevPqD63cd8y9HrryFjCkih2kdSovOtXxF9/KYFbyKdMGIOaB7swwUEcZlM1k91vJSrmcLPVm7rv6D1tZWyAzoBTMz9iqK4iR24IBjZrF7r1eCiF0j8/hJ+Bg/q5OOhzBTxeuKOP2wMH7k6zIuHZIURKFq8PeZkpG1379HfZk1oczQKaFdRwBfzGfe7Y4hx5QQe/RdetLOuB7W+tvp7eE9v4esnM3Yz1bbSqpxP1S4MGgYVV3q1OPafm4jbgJrBzL9Fn6EurkcamjMSTr4AOt/YSL0EAQGMzqeVLh2Cf1wsjdAlrYvsCGHnI4t2Y+0yva8z+dOhTsHlS943V7YCEuoUZTXGiaZ0uj+yxC3vAhGj7ae8vCe7E5+rANhmq3D5S2ol7vZkm18E2aXZYefZr+VLw6JRtZS9ixH6k+uuCrRtXunxhrtb3dX1nclJcNycuZXhPiMCYaWygc1Ox4bGQaotwjK9ED4FZ1iIflavCgbKECgxc7ZqGfIKfmN4eEOw5SCOY794ig6oUDK4EVeYENFXBQdnbOug5RCWqIl+S82It9UTbgEJdop5nIfeUkEZ6jqy7W0djRFqHjhkvJzG8hoWdaI2yGfcxD74sXP2qT6DiyDXHxcTyZ4VdUyj7l90YL3hMNJe7YcQwd4NgZviCWjBK3nHpivLZ82/xQtz5V5jgPimx7Baadig4DJsNaQIk4kcgb1Fy7sJT/npjahH3h87MuAXjDmB1YscDFGKXq+enYb3Fq9vSnnwbFYWsR5274qy85Pq046FmF8coT6E/3xiGdlugO9DUXsHt2+SbNHlVRm7/H+0EOqWOBR3HE7Y+IVCy+pgCxlMUpsBO3zB93r1GYS+xPpTeVCKF6qbwEFDLfEekDNmcEFTw0WA6Pi0hpYEIHm0R3/crrD7Ulmxnm5fekx/f6MQ5sqKLi5KB3k9hXSwF8mSiBF/5dxZ8iSkcZB4NxeyAOk5bAYAqJoqqb57keNYNUJx7CgBA2/PioWCII+wiLlKZS+OleYQLk5rLWi3Gbp5+qYCfEwfOKcphiVzpgYmw6gL9BC37SiCLwz9su0V1Xky9k2xrp3YhZ69mz+iQG+khntXZPJWv4KbY6XpjrCSwvIsj5kFN5/WuWi+1OVioVzAo/Co5xqB2j2X2Y4eKG+FrlQXfJcbPJhcY4tF/It723wFaGTkPpf9tlPmhOwUDfqTpjA9fN3IuZLYMUaAejaqfJGBlJsQH7OLW0GNzw4fo8dw0QGGo1hzLUciwfccInkBy45I/jwyTbjh/JHnDUdGYHmxViij0pNCYexXuPgPYz7/q4w1EOQsNOMD3cRu3d+dtkRfqQC/SUjpfMIrSJji0gtg5brqh6RQGCv70ZuN354MlrLBVLRQFtQcalaPL8VojaJ4/VurvxLbXzxD48yT4IYqpvuBirrRU7DHiQSsjDC4afr2nX9o6Tqf7CmZMUJbKiKWuuIe4b3UPAsfmSDvTzskaEsi57PvWRHQ0d8lvL1VC3qxVsaa3nQ+T4j3R7MAzEAAAAGNu2beNj27Zt27Zt27Zt27atDtFBrr/8j4cXFDMwRwpy/chS54vtWtwuQiLf5Zmam4CYlRA2QRDlr5qenx1r4fuiyWPsp0CxOL/waefM6Gcz8jqAaxonElVUgIZMFuyPrZJrCzrosGTe+p+mQshcCfwpA02LGmuywZPLznBQqdUoassHQ1WPJOhYLYw3c1dANdvBe3wXbeaYMcJHlbFjmHnE33PRJON4PgNQoQGy5Hi/NflHHVUY42Q1DEeizvw6Oj1fG+MH0p/ByLo002+ecMRXQL3bD6SKHeQcw37ALKcU06vgS5AOvjf3/bZigEML8eAPYDbC4dvdbrkrMCbMR6GKv37uZHjpzqtJkBcNp44XDHmpRtRsjGhuXHMKgQDP2C6gAP883qyfOAzZZo7+8DTMSLuwnM6+J0mFPxHaF20wiSm36nUOlB8CqI5zwq8cDHKm9O97NbCawFoulyERoTO1JVWUw+zGxL7BNvWxDuh5tnpDIFIEdOvlB6sd8JHnT4pjgmQbVprnOysst1z+FxuELolEmj5+AkOI3wCrbrvjqy6ySWAsoXrVi4rYOfrJAoW86gnj3DXWP7SuU4/0LAFcOdakGMGgYgo7yCTytVsO2HJ7Ct73zL9StAJfLG90eadPlCjlWxLigGzp8GDyGWZKdImKHb6e9wz9/ITC0Li25kckJi0PFFNx5ZvfQW8cDnMX/DQzPWFsrt/lXZvg4ez5gVX6LThhrSityI3DARwXDMfYXOfLod33JWob2VHYH9VcIL3ZBTHTyG9bF7/ewZ3oQXrlHpHnPXWXjwb7dKYcrnFdEScI2ZehLF3GjvYdH/am2Ai65LGSc/Txv0ocnBcLjz8/0gAtlygbyCeStBeF0AI4sIYA9zpFu/CQSkROTvjQEgTLg0HlTqoKy6UHANOtuz6WfaJ+SSqpMbjhLVC75gCkRrxvA/Xp6PO2J2o8CsXDZkGvhfrOz23RAv8trRZmgL9sgetXjR2ipoh49Hqy1PA4skiyPFkOnewsxEdN7Luw+MNAPFtvW2wZoVqazZYmlvsuEY8gNtaoz+MT7P4PpkYkgMyrzXWUKmqKsnPyJjOUewIOX8hi7OdpG28/zP430Av2ZSZ7QT9Va5V84VYbCiHG4TLBck6mYC+YAXKQ1HeDK67dE/54o9rmYKShiO7wi+4GtXXWkDrYH3HzFKYjf+EORK/ImVkuw6lzVK0vAF7IlTEIRtOP3eT9PuEctA86CM03Ie1YPpr7K85KRlGP8uGQoVGxn8UU6S/Xf8ZXywuIZ34SglC126Qba4zd145+m3Wssg8Gt8w/aT37WQcILl4AZ09ITMso9Bl65kbiy+f0gQE3ZgBftTFAvKYzcMcTFXdL3m7zKsQS48AzgU5CsvDF5blGL7Tv/t1ENNXjzdMQ7WuVWKLeIiHMk+pBtgEDiaPtR6cjlyldkgzYHYEfltl96Wi2MuQ+OQVq7KJNJPBg2GLZhIUVj4bugkgfw3TaqO6HTVBKaH/1RCAXwSEP2vI21os53nyZGpdSn/Um7utWRo+6zkdwnpepy/L9sjzxy88n75uyhUeIKu/MyGgvh+Sw/n7kG+QMu72wB3DvJMW8uVbT4eZqlzOVXYaep5e2q5ZF+OY24Sz9sWJahCx2ej93HcFwpmTFWD1sNIkV0H+afdaRH+Y7IhZfJITurms64fRvodikRV2cirFA76GES2CjWsD1S0UrB2I5iQxf8LI6QOt54NnXPyJfuf26ySb/1uTX8m/3wuj34EeCbqF9+UZVeYXk2hBUOk6/pOBpc9OmGfk2zD/WKR08IrtnPYESxrsy+SfP76bV5/JZYTNfFVbd/xRKcDx5zYuz3fVNW43pEJEzi4D9gVHqLX9k0uCCUAp2c+yBtLy6njtGFpFwz4ocrepX4a/ukzNMKHFUXCRwbXTkK/fTCopM4PZ7EF9IoCGz9s90a7wQ3br+5ufbS9PbN9sr7P1J3djPXPvwx2N0zWLiOEbggDtJPVztAt0x49SkZjTBR4io0d7JRMkqs8Pd0B2mBpqmDBvBD2WvUBW1aqQM1nwDq7uGQm/OIqjyxrdfkj1SfV5UNOUdZc35pxbMkv6xuP8aWpYrg4i2aXglgmXSJRv1sBCuwaNYYVJJcQFIgIeog0Tan94iMlxEGZ016UfQkHbWGtvylNsI+S+I6sbGwT6E7tV1g8xt6EPYha70+KuaqaRlM60I5HQn2xgVgcBO29nYzIjDz4hLGnrSqwLx2Am+7HN8V0r2Nd3HURfR5aviTiNaoDkHUI9MMMuD7Sflqvc2p4CjyP3r8fPzWN+y+g6jalhL+COe5pvXOxxbp5zMu9moTnr+FRq2l8tMLf5EzvmmR0UHEpdlQJgQ0XY5lqG+qCUpIeYGR7+odIG5pQw2PiFs+kvFnM+vJD6CBb+m5qamhbuqsQfbTfw33jWNddwBZLHpGwk/YRNuOCGSiv4wF5voOwxiHJV5W+laOgUHTDbXbrusg3RL3CDbH0K/hct7zj7wblaZ0NcFsJc0jYvuPaAABE5ZZb1Ss5IyP2V8uTG06FI6y8GHmsT/+YIYO7iY1VFKW1XWcU28ztNKRbf7YHBzohWYF6941LGLlw4ZbEsO0360n6Di01YeDcesyaeYzQ8zuLsF4s8J4j+KMIlB4fXkbG0JB678HKXBO5fPVDncBhnoZzo9BTS3CVE/Nuw02HM9aT10JhO7cAyh8LL+jf+X4tP7Tu15Qg/y28TF+ll08eQQLZdB4Wlra+Xe9k2O9EtL1K9cU+Az6wP820N7ukbcinidFl94GsRX/E5iYe5CgSwMMbG646Z1U9S0Mtj+yS7pI+d1Bgy6glBxw5uwbqF10yghjF6FwY01v9yninnZ7XtLuiKC3AEhs130CIeRzEEUj6i58rjtOGjWGazFbbnL+fFg5/nm/w9BWQNyqRfVHr1iJ8lh3pJ4Fmi5VZ0rRbP/tVBq/p9t6WQ38DfloRcaGqWkayS9a7NRHdIICLQ6mTHH6R1Sq5mSHOSwAKbcjSmxMcB9B8cuxzHEgGiA1/7rHzHTpSTfyZyyNwxyhuuiXHpnBw6IpAci2vW6mM/83+U9mlD6xgmVUTzqZl4HIUzOqwq/1WdkZALP5hvtJytAxAhI0xgeMqpj6wQh/+Ktwfm/8/5cxjVLsRmU878o9JLG1dnsqt5caEGPtEEZ7Z5cmbb49KiUzPWJLIY+Qa33Q5rVm550Kp3ieUocDtW42qsxm6fH+pMOnPml1sUnAjewIW2htoq8NKqr+RQ72dlPDK/DmTHyVOp75ujTgSE2lIhoveUDN/swwE2o9eVwqwE+zkwqdRsAmKjCK27D1lwGhLYttKIzpwSrEVcpTPYsqsKRbjj7PTO4tX8/XWPE0WGIhNzc23tdvUCqfZXLDlJ88rfQKhnV6rGxwt8GphDCzkDgawv24f74u1jjqxYC/psmFNR6puQeibnMPAor3T9nXh9KXddlyFAC0xotvGIwMLNuGJYc1hSGckBrQlzBhQ87zBywcP68cMvXkGGQfiSsvjactXtt7zg37kd7iqEPMFlAi4S2A/ljv/zDUE8Keq/69y1cca29PBtI741jxU6LvBKLgLL3f0+nB1bbeYDJxcvJwZ7RfDyIY41IlDjEXyphU0DPaEEo9uPOz887STo+OvgrUL8nsVBmIWc8u2hQdTXyYpF6Ml6qzSMBsz7DCUFI7rnVDGcGuiCwlQsmBFJUeg3t3mavwm+28XrL5c+OsUZEfnAUjC99CS1B5XVkrAW+eg707cyrcabCOiD3mrttLaFz7rpwKOFvSwbpq5FTuvYO+idgYiG2PNhuh8QIwpMeVxOpdMVwHQARo9k18pF4drLUk/V+g/2tbY+a9EpzMY/Huy0svPWeJmZFvxSzHXr3HVJ2lRPEDD6WKusqNKdP2Uq4unU7SsjvgCQ0hHvgTaWqoxjWZAJJpa+/qlfXzNtWLD0JmBqX1n/WtUeYAfsAaWIQVoDftdZLwJbiVkuVlrO9iuE9vbZnvUoo3FlXbm5hRHkqxTkEbjbUtAS3lUAxfg+rJN054NIFaXjpnj6lK/bKSC05QX1Bu4lig9aTpTKW0+WZ2BZbWK8ft+/wRz6FhGwZwo57gBqT1UpD8I+/0CxcAh3UWboji1RjwIZuLDV1PVShoIhtw8KhGiXfFAei2kHQXWQ0bMW0uwR3kjstyxKzBtqwFms6FjkGxRJ3yff67Ci7VMngRLPOxCzjxGyADaxYEETE4lK5B9RkVIQFMWSBVYCDCbc4v1xSuXRJX2aVEEo6TbRZZ/yPOUAyGepp+SHygBRClngCvZxCIU924SeMg10vvZ3+IrBvCeMaMij8XP54fIEIm1tiQ0t/gJGeQaY2DV+ZKuM708AnFkId6eh/2ofBPZm13Jvv0je/do3SG5CqJQoQo/jru0Hp0gz2H2dli6+Lw+81OW4GuPjKqwV/2gvQX33jIl6xBTXmq4LCMljm3QS8DpFeiSXUkzSVwOzJoAOiOwLKHFF3xLjH4KiDK3hmS9Vog7D0zjLgb+CqV/s7MKBf+ZW2dbcXse/grqoOILS8IOUgAvBMVLgHcTmJ4a8+xseiJv2c6GG/0ChoVQji//RJ6xrhojG5uoqFKYxHwpYedX0z38ElmgwqvLSoBs0oxXwuwxcR9EFLQo5gZegFMiXvlakx1fdOqQGd2wFSBvo/rbfFJKLCIO7f73ny0r+4TcLg7aYUVd7UxMxGMn5fnjqrUVhY0OVirCdXJO1QnpFguLzHCE8yuTp8KdtUUdkpJkgIN/b0piTkcGt6ZKfYkD41HSWEfD4FYFX331qPPfDNnZ+YLYUkl1gJ+c8gUnI0oTmuiZpLfDPFnOV2SUfjvDTDhp11Uz3tOHCxp7O1O/i2hM4dIXoEj7AS0/b16w8EMoBN8K8G7w5wslgWys2LJDD8gFdeXik6CKHDI4cfWuhd+FGV8fI40OhScywznt/XU5TkaRyfhXuTmZ4dNO3Iwie1YR0xVlTek5EeMrp46zZY+NwNaP0A6/UN9wbJ/bSfbVma4RMUu87IsVjpiRmAmlawM+IluYHolN6G1bwJ0CSUhT9//NMURlAwxcB9ulLQBkbI5lpn1e482mDNaFJkCVaL7Rmc0RU+rDw+/aXKOrZQ/bxZgFE4wcjIRbIjk3evXbQBeyYqpsW0jkpwtNYE9iUxjC1uArvSO8VeDu20oQTVoBiyNACPZvLe9qkyTAw0soCc8KV0XnYv2hsVSfxXo9l47Ev2+FVVmY8zMBj/FCQguWXIMAXdDbnpP6xj6RHhc/e4kIcYMkC91B5ymhVGC6sBKurEFogNEfW7kRJFhyF1cqYXhShNr19UGWrnhvZ9abo3gsdd2hBr8AzF0MalYMIFXiAPfAFlaJIhy/VHI5uWEyMoZQs/KIH1yjpowRay6nPJzj4dQ1zgepCaTOkPHDAZ9qWQ1sW5cURalQm5r+yuuzLA5MEbXgCYWq6I5Kopz96gNXS+rNS+Cpp8y3Pm1v7RrtEppzsV58BV7vU284BxMxJL2sTBsEx035yap9IcbntEF64zMtXp3k3Er+uLE9gXEfD1GGjqDVCbQmpq2rBkpkMBc/3aDD+X6uQcPdWmplmGtO4NtyLvHKkN4rBilLxoexUuUrpq0LoycWmGiFoQZ9w7wMhJKAqES6b+1EsSxU5m5AYB1z75XcMU99SkPGx0uzTlD99pxIOnsSGtxhcloRAcpp59JW/jDfq5bBLoJ6D4vEjmsAtcywISxCK/9v6le1/U+ucg6e+Fb+EGsmjTS9Y66mgfIat7SRqBAekdBu2R2WCJ0jKMijKrSrkAyfB/V+w+vbD2xYfasC3t7rfqQEaLuJp/ILsf03T1PI2cgKLVsu2spM+8WfHM6jKMdp0jQ6Ns8LWmJR5LfUDOB/WBCCngG1lGvD11OVRbNAKWacpTsm+IMTdk0IqOBDAKui2l7xNF7jBcHBBcNhiz9TIMKteaAJo6Rl0ipu/iN/hFmeyxK8Z339xLYIJy22mp7Zl/CUxXTszPkhh3wLgnTpO/OEMdz5EQdX7sJgZigEvpo2ilzkDHf1v9s+ZEsYAz7kibi8rxa/S0O+EnV4kMkJ1JYaVAw6MX15Mjb5oxXAJS60vUICnrZzCey77RRIhlsO5GPLol2yxQTGSuatP4bdHE/avvNYy+R0mUU2wAYAjDGSefRsk1YKfJs1ylwqRutzuIHR1T0d6wccvgCbFvL9rlZemJEdbfpILj6IAN8iDq5Ds1Y8xyef1zl+OPlUMZXT1jyNjn+jW3W5vJTqTLgYlLeCnxLIY1hC4GX7ctMYsyGZctfxQ4rNyxeRRUNNIMRZseDKbkPhXoh4aSgPVQQ1mn0hVczUoiCO866oW4UOFzx2VuSrbnaOlXMmERvI4xamEyWiPCDU5zfDLNjlm18hgxMQzk2pF4LfnnOYQHkgm+WMsGmMaPVaYs14aXjJYOKqs03pwB+OcPB1VzJ4WPCbf9zG09tQH7S00GacBvQb+wY8srJyOQj4dhvQZZRphTUO4pv6n2naaUJMFwxhRKuo9+6umEvqyIlJ/TwdhPwzYD9aPJKJQpWMOIePnGdI45G7lRuNj9pTrYderSRIjyzZPprUH1eUBCJjWqnO5+TEd7wfevUid3W0mfcokps7/vt458eYf/Rf6E592jsGRgI2lTqocCFhd/4Mn/0u1CV75bktb9Hp8JelVE+BTN8uix5hTFREup/gTXOeIjSfisNZZfR7QPNRZab/3xBKzBLY9HCqgcCs4YN40D9ncQBE+WVuvK30zwEd/eUomAfyqaBk8faX6+BTPcXcIUhnRIcRfhHtTRtvNLxKGqnMBhNbC+YBhWANypAYBP3WJiG3Gsr98khLcZJYbByQMTvIJisn0cJQ58phU7sp8iyPhGjG5eH/inOMwm/PtnuUmWaQXbbhrxVa9gPP/mBd/YLPsh38K2gkE+wx2HDV0gh6xJMIhdRWdMDY98tUPF2j1L0n85bwldwK54X9K3kXurPTqb9J+w+MbBwuhMYJpmZ4qQYccfB0hiQq36DAckZ5PgQwVhI0Z2wvbdfufEnINPyEIRMlF80fqTmULH3BFPHG3bspJKjtaRqJcCLK5lf0XizVhSNlek2lbeIMnfjn/RturmdYGKLcPrP8Eb0w4f6wlPy/noaNwjpHfKRh7uAYPLCkehyYeAlM8sahs6jIla4xR7cLxCqNggJnMLcYfh8jvG1kvlrlRS/JpqaMuE4TGuDyFnI1Ssdks97CHjxRfKLE77PwuMqIdqdg+aCfikBmBeTPWKP5R6vIliF25AkfRlpJgRbV6gQDmpN/pESqp8fnVPGKUU6bCznW6dhQyo14RYm1Z6vAvlq+hh8LGlmhNIoHVwGWmpXRIiIfiknGEmD/FmmIkunJUc3IQHuNHATKiVrDNchGBZ1NgW/rbHGSNtz9QkbTyZcOMfZg0pQJ5w3EV1xF6sll/mbuR35uFvDorK4VHW8ZNMkcK0GSRYSNQmulQ86Tc7tofTwL+J6An4IFK8NlA3+wEVLJ/VYMPSQhJQHC2z+vIjIepyJhF/Eksu9uJqvnJUQ/ZNhmW+UMHEPTWcK5Rg3ATsicIW7GsRX7X3O4SP5DRV0NtUUyJ79xtsQMOICAcXeswfQqYSdTGWc6zGSP+RuT8g9T9r2LSMQBYxNy2XDoyAfduuNepiineSaO1HFNlczMnUohZ7qfs0sKhdY/4GfJ5wouYosW43MLsesLBHMUQP+xLpBEoHqZB0UEFSTfDSZzUvZi4W/685s3L9cNqV6ABbD2oORK2wk1I+BsQ/MfXjDaUwrursIWVyEMilTCXgNO41pQ/GurDy7DNnRS/TlEBsJf9rCNJzNnHPItOgT445TZohmKIiY16kVqaWQus19+CDuIuA3T29xjY/STU/G5Nq/0r9TCQ78SJpioUhiyRqOepL/C1SWvTH7UqSYsVTOUAm0+yl7mizdeVPf8jLK4LPAxks2wGeGQ4gceMrUDzUjovBOLc9cHX2/OkUkKqi4sw7svKYDGW+ssVyF0Polbc76xQQvs2xN5FZqNIYd5w1M6VwoL9dg3ge0w25EWKjr6oZJUORGL3k7zuQu0OW7gPCwQqA17a1FsVt5NlFWrk5En0tBvprw7lD5yDauy2SEaWgipK/Ug2qoA+41BrDsUEQ6iDOkyLJ5tYqMl9Vc7mxPm93OkbGSpSwSdQvzfHI4OSrfOeaUHZUzMUMrPGxgKbrTj08UaD5mrelGWyHPSt1vZrU0iI7UiA2NxxqNAu6zLQtp0FpmJmy9zt0/Cn4hqksxpPWMPtksnGgZP1iANBpH5bSEKBX4z+uTtOFa1RhBvBHoaHXD9cEfsZhPFgJA2Yn/qzl0oBHOGmPJLnBXYJ0RWQ2xvyUhyTvm1SD7+o0RHecA4vQe3jBqVcr+TCZtFgRz0+o6E0sJqv8o5TGjATIA9Fka6mhAuAutoca6ROTDoLK5o9SkTzdFpCuOEr+CbRp1lRAcQ3Xnuj+S2F7R3p5npUNytkApLYllPCtVYRjGHtAubXiwvt9W1Ql+r7QKoEbFbXf1ri86M6BWXHBmjcERFdlVylar9Dd575kf+8B9LLEzWhFmvF1/a9tbH2/ZNAzcaBMvENknz60Sc3lFSbd3XNVP7BGEtgDz+ECV+z2PaCiHTtD7Jg3B2PONVASPvkToGG9DaLk72HrJsTIUKE6gAUjqdXkvf7b9l7a5ZUgURt8WH2h/DX3i4PEg41EuH3x7eGkn/sNRBwrQAsxSFPhnqco8IstsHK0QxlS4YpPzzWGVWVrSP5FocSwDOUx3w5DMMLyZKXU9oeiyK7Spwc9hKa4EmOwDLZOKIT8Kqkwr33IvrPlfn1PDUi0ayTb3zBFDv9XfQ7iWDo/OYsg2S1M7Qwl98K/TTPhv0H5fUcYbdoNJWiI2qrcexj7u/29qdmh1No7555eqs7qZRC1RHDZgOPxFDu7ivhLC6DSe86q8LlYZO92Y6v83cQxWDdj71VEryD6dhdNI5XQeHel6GsxfhWhETpz7VxrlzQDu96bNtjnX252nbVFMUpqJAiFHV5e9bmP7MgCcB5gj86NdLupmG24kxkK/aPfFK30CuKyKO6qKyC+2HowPg/5mVN1zrRRoWbNJFeN+xSptCQApxCbQ3tABNpZlJWKqU/uMSoDFnvbWR8ib7mcHMqPED8EJr8w6yQTUpVP/3zl1CgxpCTYCQakW6uYASCjG66IMsPGZZDLJkV+svoMC1JdzLQOFyV5+98en9/svA2NDgJmQPplbEOAzTEpJCH+SDT7Y6T8YDbXsYK5EVDgIP3fzhl/fX5bDQTeIS5xFjaUomY6WWhnpahqDeE7Nqltl7BuRRdOBHDBD/bvQQOgmc7lecdiu0QlvMVyJLKQQLGy0MIT6ku1sfTx/TgkxpVdn2CcdQGVsbgc3BWbEF1Q+d4euPx0V1mNh6nXMI2uhwPbZBiAKpu70bEawUXREO6K8noECGuKhn4HP3SWAgKIeIK7xgqCK8EATr7twlIN2eV2LbHfDk/TJMR9ovX6LYdFE7QW9HdiwWk8BMNk499S+SWoQfOLdqKH3w1tk2Od6mq1Db55u1wzMoJCSlcAku0x0R8nesIsy8FrzaFfvzkzvQkSNoyHqomt6kK7eldq2qmetbHV1KXPGTqsYvaOx+g1ae/6zWafdE5FFjVltGeJbOmKPJQ5yfX13Ob6QuoSpKuaG7nKovHN/XXWi/OHlck29tFvIn/wRGKbMPWPNuFXaGbG2fiMjd3G6SUMxvMpAbrVp62b9M1BB1w9q+WuiYZM1qF4lV9guTFFOtwXfHrZqjSWRBZcROfrhMjCtr+UWkcUkOhAqu9QE8d7rXcoph8WA0SDGJDeAt0s9YAb5QA0i268mamTbHDV6AvsVQuDX5YnHW2P4bR1UmfIx7TDD8jYvsn0tkDZXHXlc0uwaQ4Pe73R0hRLTtOXABugrj05bgoIzivZGjtaSHzAQ/MWqpXUYJWD49ZmFil8Zh0D9yaHgCvjYpXf1GIkeyLNJNjpQAbVgGGlIOyqvHimkKZ/RyduoM9Orrpjb8Hdwu220iOSpGte8xdJArUBHGfvK1dNO+rM1uB9CeG3pT8je5N/4NJezNeWXDxJEFtbR4PrHF7y+IkfW67XZSnu+xymUaXORHbU7dqG6LPjbnOAxobxwqd0OQOKM33SNNcwFK+nGm0DgHsj01o71dOLRmpnTFpNsXzZ1WpP5OK5ECJuvGe/+8ERqzjOf3JzQ9gfhogtHcOEqLzI9kRq9Nz5sexTZ1yHq5XpbpiihtwdDh76hhY+nUV0+Lyp9cAUtZ/JS6ASnEcPcKFAaRYO90vXSYeIa6xfhZ6r/RbYlI7DflGj6dAeCEWVF3qhBtzEku34NWHjxoqgdg+cDLjhYgi1g4A2GlYi4SpA25PcBDY9mTbph9ZytvjryOR6vZ8aO+LQElDSa/Py2pdm7LP6sQU5mazgK7HsijLeQ8bw8cAg9cmfcmIsLUcl6DQ9izdR+u0Ys2mvOQB+rVgd7kNJKCbn4jB9Wj58cBH3NH+KYp2WWZ2O1w8KODQgz/UEaiYO3MhMpCBcahjQnBn9fXZgyG54HE2SiPVMQNCPJ+o9sbk6KG/VFp51R9R39u64LQo2l04eaIS2NneSTCAV8bFzkEGvXi9nH8JsOPjoIm/EdjJ135Xf6VlHOhYU7+MYnrXXOwLDS8CN31k55LiU1vj9J2RKHm8kNwqYNXALYaAGYxVpvD6f7Zjc/+QgFHIu1m5i7IiIG3CANjTAzphA9Ot7hVR9isSIqxhyylEDoGQOg4ghpKBsfPQudjw5GZHQxDfwI852Znbl8R6r+UYysGx4h+YSJXIUcbXVKVuCnQhMATz4Hwr/EMjYY79Z6ptP7K4kTZtj3E7570kyaVJa04wnysPA/Zd5Jv1SYSiCbhgxlh2K1hQBUWiyITUbbmtXQ0n7TjOBJXPTsStH2MzMkP6iFqVz3oHnqHmwPfcnvF9nzyKEkTW6Bl6xGsOvCU1jWK3Xgko8ZMztrk2sbesn4Mk52QanHx2pYQf0Q0GVDtTNEEfL+Ab57zL2seudZtJLPttnJO4jbbappFTlHX+6b4WEx5tKRZ3kiB31PoRKhodKPCMr+RH4iGB4xLwbSFu2Dcqg448B3R8qeVQ5G3ndZxhRNv6mYyj30eAFv4YfvmjIvAXrjt3MgMS4GlvUHryljrOH6IwaU/JQuxE/at68yFv2w5DJT+iLLQ6DPiwh78vjdj3hLAppRCVM5ShpHzLbyI9VYMR2Yw7mmI7HjTuBKSzvpmBq9XJuKHvYNfLaYcohOih6aeNBItoj2Mkk49UcdMFEQB2mRCD3ROe0YZ5IdPQ+3ZkF95UAtWnWnfbm0j+rjB5ieIi/SAzkw/7wFY4R/eMRt0KzNSMn+dbbblMLy85y1SEBvsxpiPSeSH4hnK76SaIOiETbF350A7X/CDYBcczTYoA6qaIknHESNn4PQxHyoLES4b5roUuvZsUKi1gCOpogpj7xUDaQwWUfyNTmVwRTy7cyGdqJzVCLQBWrOAHYBbi9Oc65QyCCeFY/lKKpCgQHdNtQGYyPf5C3gmDlLYgOS5jBdETnBPHE+up3UY1aE0hKHRY2AX+TkLAXr0rJedA0UzrqF+jCCBuSShAOqCgpCuq3440XZU3qRGe0CGWDkMH0bjEhWzDaW7JGvxEbGBAHoezIHywiEF018yHH5HHKra++xefpj59nc7ChFwTT9RkKyA8xC4LLzFftGnXvqhU7QCLNQFbZ7bYHqbHEM4w3esk5nkZ8LvJ6379I9iNSOFj5lArxvjPLyy2QXpUfE5L2OFHloaT6A6tZUxhxqGhBZVZM3D/EzPc+TGHkexyF4fMsM+wcRLIO8oGBoV7356oEyqDesIIGu/vAeTtmqKFhnkY2rTKqtYdWOiisgQEtrRU70nXH9Ao98hAzVIy8OIHmWug3nrKMkUUdagb8SUrYq5dAiQe1uRfFitYKQ6j4R/6erubJ+MipxSjYTaBlJGVnmPQpwSi+2mPi3mLtlbV4rKBYyjfbK3M1h01ltoUEnMLOKQ6nrx2qHc1mheUBRpxmefyl8zUW8AOhsoXcY9rCLTq4NnNScTbRqa2mxpTx6moEJYis8+fd2Fnr5Mz5Z3u+waG/UbzrRg97CapMaPwBx7U+CZjOe4A+u6Vk6owJqHQkHi6BIvUbB6tZzZGIhNyWKZmTRnPeCgcP/Q5lbqzffNkHN9sEHdT4EueL2MNHhU4A3o6C5iKsbpYR2yjoyHXKy6TtpcuTJDiCEPh9v0UzG6bxC8jbgo+A+p8FnRDIa3c2svB0AgGyny67MnaS/wQTg7NwQ18t4RQRNchd9jgcL6Lbe2w7clSxFe3evL/fXuigIXP0HEK9ld6yGVcImOSqeRCxW2FzRfYsmEOmPQOSwqL+tbkglWcv/8NiascNfl88oaso+9QvAu2vc271XwSPeM+gTulpPGeQFRSfTyq94Qj5uxQlmsdYG8igkCJJ1IrPzBOJUU1PycSa64EcpqFNR11mljkoWAqu3INUwaIWCnzdBjh6M5G61N0dT9HwiKca1nzVt7MGaGOr/HmmnZcN8KJ9n7KoWy+/pxKKVXscbfIWbVPMyjNzUapJ3jwLeLYTRcPvQwNA6zKdUFSpN87f1wHc4Vqp/5vgwjiMPZibWzGfZO/CstTUupTsHWEpnQ/dhlCQk4DCMMus6oYlIjtQKuujB7kixyZpI0t0/kZpcEKCv1ujSQ9pvtHZvDFUAI4KGyLZB//dTLdCf2NcRw9rs5TjXP+itp3rN/Ed4JIDtr2NMYbDgZQkoNHWYRNogsGmWHAtdLIbWlwJqi7ei7UqstQeFjGsgzHFykNCSfbgiH4Goe4WzbMbE4M1vmJOq8zGbsIOAdlf4dXPUCZpRyuganf06R6l1dZXyVd1RVQzIx6UOfg40narwTf7FnTbZA66fvpA+UgsB/Y3ZXswRBeTUUuMqmGlxZ37QJlCb0VIK0X57UUSpkcXmU4tQPfEFJ++Epn9NZpnXYD7egTCyMugPvd9gX/VUh56rqIwP0RR9igmvx8P/roajGg60QZhEkHYSVOpQiY7P0EbBbXPEnRMwjJZldKqlVhkzBDS7fPjnJQBeAeyxfhX3nFrfxj6ojo9lBuWqkdxBiDPMRT/NWrq2ptZidirj6Krgkv+pFjmitZOK6Gg0VI1CjzNnu3CknNXsyohUuhGD87QqFziEqcdwJ8WpRwlHUYHW2BOAdvonAHr7OIMU6kiLX3XYAQxxjrSPU0+LbO4Xrshb7wrTC+8grJqUiPU8YP3ce9AjCUEx+biyJUmOuskq07Q+qmeI/1WPA3GfV/r00YngkYw/urC5Lc0fWw5YgdJE5dxHtQ22WHGe98dCWA3DYFt8aXG0UKeE8cbDQhgib/9DywUVo74T0NTN46h43zvUvrfR2NpLQpibegpyLQIxABwpk7eFOWmCCCHy9VWKkWBis7lLpV8ZcgHF8dszJm3snYSPfBrT8FGPeHQt6Jqmu49YTAQaP5ywOOiDg+oKsCRDDIJ3FJ9XlFRGoNDKhXyFsBcu3qR2Ch/EYm9OY4nbE4z5PeBrvh1UG1t6oWoryKMgzH2ZT6sD+xeDg0bI/IGoZIuB7cJhbgChTt0G0k0KqbjppkX+gADMeeLvKtVSnoqfJ1HcU1FuAuwOcEcr/anGP6yFwTcs7yuRC++Moy5sEXfNg+l57Pi9y4FGUmONY/xjEQ6gYz02DX05O5ddhRlKAldAY30Or24LjPUfJkj37zboRJS4AS22b5EIu357ZsmjJJZxYZLAYg6C2tTzLVv6CmIHO32xJecnmBsRuWRmkB0fzzX7QPAsBmqF4EbfY7aNbUFrJpg3UhEbn/lQ+5X6BiFOMKCrLzLubenjYICktjSmqW8sw3VFXmtcL8ciSlfc+e2Eq/+VciyLN+hkhFOpSc1HyK8n0Oov8IxhBtNeZRO7/wDq5gPNhjLWuNpRWhiwCiGpgHHInm7pQ6jRS5dMg8RohSAoMNINglf7hjT9OAeVAowD5dxYq3EIfri2fSX8N1u0OxMzYPwObkpn6LdptgJhs4kF141qOIXIC4cVwize16hPKOzrKxVhbI1/nDPL3AilXLmpsEnjv8NsnN7Scsq7S2XqM946nOhtXd36wRcFZkpXVJbVwsSUfT92V9Cr192pFfi7zxaKm+3PbLCcCj1b7IBRTN0THJqWP+4uEJGeFhDJhJOu4UeaZm+Lhw62P33w3gFocfa9kvphc2OWDIDLFJY5rmSeGcrBL+Ygn1Ntin8d22NetBlQAt5PF2NuZgXo86qj31pP2RbSv4nNZYvxZiK5LErzDWftiO/liOEypvsiMXGkj7F2+GreWsSA1QGAxXXEO9hNPfLasVkYHc7RKFWRjkxKroQHnbo5cBwymz/Ywl4duDiT5PReKGwBani+GOvqwLpXDcKKBA/fzP4XNCaOE+wDJxeoEANJE2+hkzfbMXjM6SNcwGj401TY3L9aUS0UyHfQNEDK1E6jE/IQy4/LSCZmHCqykjsX1JUBVtqFWFPOkIncDTI8EPsP9ILrHL8GsBJfnoYRoq6Co8SI7EQ/1Vbq6KLrZuwZphi/rB5/zY6UZQpbvaX3gLEIJZGQ8BZsfmX/SWXp3JCOrBdmiPE7+G3pwPCHgPxWVafFolAL9TR9K8MI1iJlQjkvJOOGnjKUw36ezWLPaHpTL272vk0FUmY3NO8VYBrE+C4enq8UY/Ry65hv9y2NfEGU5Iscw+xJQDPFvTUo6y/6KfZq/pVq09QeTm2unHZjPp7vAw1ZLiGzrZZvzPURmwTyI3WWQvp3SkA7bkC1sHoy3OsBGfSHiogKa5lM7+t4h4CHOanVRPF1lCATasiwvBLsB47G52CAzXeZXV3k14vzYNiMSRZ1Rbx5ErrEVVcM+BM81XP6qCh+qGpbltiOaL6NpMUYK8ItplLBS8d4ilZpWtAiyY2hil+EzM5eTAjsalrM1bz61I/IUZl2ZreCtacaBG2aSnVFzpv4hlpJnO8o/OoutOAGlX0wjxecpvh4eZNLKhUckhY+MuwN/B6i6hg/3yrK2k8+HODTwb39fgl3z3JK7f4JGC9MEf6mcwwGt1iTFlHiuFPEkxdCdCsavWfvR6Jb8QkAq9L9KVJRpHtOJMeDTTt94hIkvZPbLdFyBbFI/uAtxvUsS4hEx+efbKaeaDxpSWLYjlw21KTeJTU2APg9Lv+jChTh55uYaGuJJ/5pdv+aVcryft+wtdXuN4nF2M1MWwaaw4RU35H/wK+7wmoOnyGYZpv+G70aieeD9eIcVUQbdVpmIpKvj9Tv4zvzaRLHeN9Uo7PkXQz74c1bM8FRAHsC4Wo+2h4WBRBY//9ArLkD884CCoY1FJf9ysIypqkhlh1B1l45xHa8LKUuvzzP0KujQwT0gyR6SQYF4WOOdzJ5iaPu3gCNIbo8FMaxJLKtZdjllmg55jp0Tu8EqllVyelj1ujdlA0GwXtj2CNuwjdUzvP1UoUEZuwx4ddSSz5KWWzXh3is95ykNidKfu9QZg8b03s/vD9CLgAwcmeWlKabeL+X7oSxXvy5DRamlbUYKGeyCvbFQavG3u7CUsgNLcdN0s3mQXxYwCIC4jPrU0265Xw0N3bDnlOQxuDkanwW0E4v5CDJJOadYyqRMX8uERqkIgF24xMXX/FVliZ7/ioHvfIUc2jmvgWIEGSkfyOPKwbcpHf1YgqaR98IvR5tH0hfknVJ38pK9DXrQI71ivQPJqACyJYugQrTBLfTlqM1upqQrCKlK9PbhNvPSvrOT3J1ANX7GU9lZi1GalXxEZFpH/3T+AMmRLQB2wQkCD8x/J4LDTwzpse7vG68sO+XrZIaIzjwvoySs54E6M8HQEiQCBorSfRPTmE+X/ZGMpePl171ljauhp4EKKeciLQX9w/rSPorYGIDSIkKdKTrh1ykKVlm31hOOhgdgZYWXju/jZSwBM/EKRSwG/oiDOoncbLP2LZJNoqod0FRA/0I+GslmMC8emwATOuffzdqj/LYJAMFnMtZMl1d4+Ta8Xc6QI/Cci1a2Q4xU45hw9slsaTSi51TcrsZYHkkxYbtYmwljW5rmLbOJpb9ojWZ+5HpOvRyQxSDIbq8CvgrE2s4A2REs6U6lax+jFB9IGEwPs0yHsOlkbMwvWFfJVp17QLjdWsIGH7we4hdxCAhOO/G/+CbB3LaFjeMRcmKfyCz6GCyLjqt3T5FLl57WLOc3RZ2Lcf/zpGd3wt5tXcZ+xPRSo2mrvoJ9Tcu0AAEkpGwXqTvPsXWM2z+345m/TcYweuQbh6AcLBGqn+kiw/VjysAXPGUrWoZuiBOWtRpP/loqsfKbgkdIpO3SJvB1KADSyjp2W60fAM9pJrxAlCsrSgOwdGFC/dqAg40S2LG5ZVPKDyIdGmL79i6CPiRaz5IgBcq69HBE6rSf7xH99BHRNDKUh7vgg2hqmpOwZ2Woo7pG3WNyH8Gsjw+cbEpAxhaaqA2mE7IG70KPjOxgfKojgug1HnCYzkLXIn2X0x+MVamYNMF15mYnA4ZZTIZiaoVwD0jLy3PXZVAPQLOHYIfaygC8Vi3h5/tzn3BnyZ2PK4ZUT21FUOd2aCJfb+aq/OhZqf39H/lm7AG4VZ+N40CdqTnhOuj62pSmCfaHUqmucXlzXV7rDyQHweR2+kcNMrMPoQ/MTLQCXVmKNArJw7GimYtNSFJ9SsHIMHmA02h+JTjhGHwIQHvmMoQ0nY6tyuAhTruTCSz8vZlyQdl7yA+eKnp4zgHRiluNPk5dd8NWHNN9FgyJp+hAyJzY9L8TXUUC8v8HFE+EvU0hID72TKcG7p5B3dx2ikwiHsb8b3XEV2KNLaiPq1gjKkrIvf5KSO+KgZMaHnSBr2j/OjR3UwgIO1wJgH0eseWrxy2e0l3NbsUEEMPzJNExLCMXBO6g6MTgGr/tQ3hoGh7mlmWbmHc19XSmLMhCv54ZmoOBfEu7zYIdOmEApHsvyjvV3om4cLrdp5Q93z6J2Ooqih62bbl9AVIqYlwqqpmau6csYnhi95neOg8yGAj5XRrqsSO5ectwM/gqC+zUZtFTpeSNnSNIdLwirLe+2LHJt8K/hJm/T8Wtf+9uRiwBy4Tp3kxuQK2seRx96U+NvCpHbmkhsCq2vYgXkgU/uDZ6bdtb5re4lV/whO2Ac0X7itWCw67J/EL0I5264lRB4P+lwHwkSMyoiizu5o/VWDp9645JpTKFUo3Qh5mBixAVYCXYR04V8eRREpg2zCrnNfi30qaHsJ7ppyH2STtoxz62uL++Jj2GRU6MrexmYrHbawbDZ34AlIa/GSc5fiEQ0mBlRjTN42H86ljkZNLU7Cfu+RjBV8kq0/JPBLERp7BQu8REv/RW6KO0P1Ee8FeEDluhfPJghaaAo/ReS/urUUurVsYt55QQ2tQMYXflZcLZSMexAN3tjVSc4R4bOCY7WZy04r46Z3GlFDh8KYY6J9SpL/hdBwNxYbYL1W8MVvfLV2a3WNruQLvaiCKQ+zGG50J1vTDN2Ka2NZ8xTZC3qeDLo/Fz+zLbn2huCptWdAoRrDYG+NvIxreAyjpZxLhcHm+0RW3N86RuBETSMkcPenH8QW7MMrJR3tZt5+ympMoRNpl81YKEh7IG5Est/JOHxsSjWwW1zngYFZb2+vx25AQ+Pg1A4NDCOiQb++P5zWrzFsC/TWOmXrxTckmywIcw3nVPR+R5ohzOLWukhSjeGtO1k2REOKB5QxwNE2/Hv3AMmB3pbm0su0n6iY8DP/jDWZbhcQOuK+1ls2/XUiSOp7B+qoVySMhomWtFoq70aF9l/x5uXkCXKHFrZQvGlsW9CjWm9logKSa9dt9SHgn030V1v+J/MGGsKzMy+oz0DIeVjreX/JldY3Q9hJTShjA/7lBCFNtzHC03aJL88rIKtf2Z9JFPAQKFpi2aKmJqbxBHRTDIvZd6zwikJRupVVgACowvBAPcotHN3awJru+LvFPLriavW6bgsycT3lrd1rVYqKrWptRk0HxIyVmsxlu+mlvIBMMI5JPUImRNlQzZdYLEuQ7Mx5NUMPpDmuqU6BkWm9PL4YnF9JbMpshNwDg9Oqp87LGh6SD7cuvfO3WHFj3EHjkcZ2Rk2KFgbQ42OudLUJqcrBlsHXFa8EYJuDq8bNywNenIK494nTY0aKg96o1/OZIePF1ik2Hzn/7cMvJitBQJaaqt8JbGMhX/RBNqoTr4NAhij5iCa6TSBsDqfjyMaOxVlCQCD9alkmGlW59Cxhj/ldpcld/RJhUwlw+ALVBbxfSIMj07KLREwinm2SMg9b1IJKV96znRriL/ajFxn9AsRg3MHllGCRVBmNy+lh3ey9IezY155HxWlccD4ZKCeAmOkl3P0WZLpJlm76FR1lCydKj4x2gheXg9CJfICgK/87IAOD4G6wgldehQrwNBc/+FoamCay84DrtygoTwKQbxVkVY6IsQ1/LFcwvVWhXz+/tCtJNMgrl4mBUnHjzGzkb7bWzDd6WQ0CCNA88dsoMXAfOfC0pZ69rmBdOiK2x/QIra4ptGWt84QGUhP/WbfiM7iqA0RxGVzzxvGp/7FXtRVlL1JP8MZWnsZ6KCudQF6WUP46ACuG1a7WZQiBF0EsHHlaj5pcah9U4TcmQ3BLqIr0wb7mnKqueYn/rPsdf4dvfAKzpmQmWouFlHVAG3SxzbiOCAQyG9FhqchszQ9A9W963bjnJrajjsZHWQ6jdUap/VxQpg3ohdoj0y9lcBuVoZW11wI6yBIOd7un37NHOhAk6DVKe5xmaRyP6QloeQ1EfXZE0K1g00kl+SrU96hP9y4/zr10SzB9G7athe3cljqG8OlJy4FMntGIZEVUO4nGbWhHUDeuZlpkYWVC0h9NrTuhzAUFCZf4OsLiJYkNtZvmUe4ixJUhJwLY9meXC7UZI+9dNsdGV/O5jbVIokfsq+bt3VhH1OcSAzL0tYmrADCAtnduUolR46E9fD7W6vmv50yntE3KreXcOvaHTS9xHVkS+EKiqn98NxHmRpjEAjlm/qVPgrmHzCcKXS1cHcPHJPu8UCLeTpTrLHvdLm7Ngc5GRI0OQ18t4xfaYWW1NbhRfgd713ptccZYZsK5JNZ3dzBm+U0TzSy6RdTNvyHxivasOlCjemE8Ipcx1BQWIAITIAk7BFSkuwZ9MifrGZ3Sq7okStz/or/eK7lN8DpbrH8PylQRBKXT7Pz0OxtnjmW7RPZip5ndhUrNiCCtbnrXt8jtTgFckMvUhmj8kcYuWei0vKCEqO+I66MkB+3G3HUUyr+lZ/yNHIpIHzkvN53N/J92jx6iEK5Hd4HPhe3js4jgMaB3hZaejlsiOfmvHV885RNZtrr/v/60+DzFqTmhhHysFyEZ6TkvnyyeDSRDQp2NPCtB+M34Bb6u8kfcAd0MF20CIGUD1MhSXaZSlkbKEWFwcr9y7EvNtxJjhaqjCaTFaRuQYA1sDROGbMGmSMdbwV/o2exJNhVmRpgPldrV1czM+vHd0i/8aY+wDP2TlkL1cgQJg5TQJpt0/jb4LHFvcqkDJcY6JEs5ZEwUbYE1e7IQN52H6GzLK7Z4Dk0wOuXX+tc3BNdhol9fY856wEMe248RU9e1mUuDMoIiEjbrfKv7xTdUjzeKmBUNXtZ9kq2XPgYDFg7CtKsgyraFNpcCUh1vvEaFeOasCFKOc9iu7kCztf0p8anvhyd4CYO/r54pBPtL/ucC3BpVRiniQmuvWMroljCkN9oIxpeEgjf6poudhlIS59jHxu/fJ4RtVcUrdtmYyfRc3Q7SQJsEHRyqK+gilRR60kXKljF8twWUHJUXRP4soptlGH1/PHosrWIkScrjUF5qnlvI/F1sjofZtpNHko0lq4gAhURzyPCKJFn8p+KXsn9hdQbPiJWpu1sOGG5CvGGf40bS1hdvkbqPpCJwekkSkaTdx2NOQ0e4kZnDZ9B21yW/qDtBwhcrnzb/6lWg2mFUUyTWPxfCoze876MOGM3HkcvMHNdMM1+TOKNSUbhtsEsimA/eVcAGYRVDCey8H5ktJfWnCn4/WYzDOikm8B485rHTiP6v/z8B9tmvG1hf5gkSF5DqEwAG4R1/cenbFqJUmo4hYP+kFXFQkqpMXmr5+IiB/MqA3unR0wK+FsdH2D/ZDsZ10dos2YctNvtRTn6Gtn5bXMXmHaoi9Pagot2KNhofy+VhuYOMO3k50Q9REUQn+83oE1rU0t/+9YAEqG5XGZml99ySzomw/PeXyjx7EiLUOaeYUNtyoJIUfpPyyhEZaYvu6+ayk1BxzCfRB518YXzjKJ/oc54x43uYQcp1N2fzrxPUNKEiGfXl7h81PyvmcxdO1tewjeQPnbCbGtWpk4P+WgeSYuD8qXRtwLtUx+EDPMnra2tEJlqNBrzMjlORW7XUzaBxEHn7rQMae+L8FZQaU+DzjGp4u+0FsJi13i7ILjcVUxJIhi+Z7X5FLEYXb6mhh31wat1m8y2TpYDVuPJBLOxHD8BcNC1tO8beh1+sMSM5cfAelbN6vLpaMGqKH150vOxSZQ0AXFnAsgDmjjb2CvY6IlEGrscSvTl5PWQql4ed24DmMVUllU2DedqqKW5cZuwqJC0xsVTMFxYJYaO1iGG/YMbGP2/QP1C8Km3cbX2OHMnk/a0UBO/1rQ/Q6b04D7LsFjvZvFy5hW+f2J399A/Wd2kXWHs9GGTq9WGTVktleHSm8xMWyuly1aVtxJp7dVpcwmVKo6npcmXRwu9aIAAE2sTBf8dkBeNXGrWtca+D2PCysWtEO5Jnpk3iARS6IXUGAtLvcYAq2M2Qj+grYxwrHrTENYGtFHY3so5sFkOW1JD/OhUUJl/FuRbOjkfvv7cYxCfaIn6sK3kpoIoDNU4WmqLxHIn7HhXm442v/cXWaCYOgo00FndtT8u1zGIEnwDCfk/0o13r9hDC55/RLd/3BGmrbsY+MC7ZZqkeGsdarso5wDUSWH7V6xpQgD4SwwZkj/z/BPvilT34FHTYYqCztAH8s/zwz6KxlzW/wt+ydiKE6KxQ24o1T7GpNcKj3+d+3tH3ifoEyo2AwsfZdhV/JpCK+D83qPOSNnBFCmJanugFT4SvEk/NTgkKkJtHqSy/C+OeqiheeBQDjjqluI0mn/Pelfc/cEYd3vCJYNAFdN2dSuC8rO5GHjA8ryFTsynHmXZOd5YUl8sE9twlxL4xqWqoS/Lh7kd5rA3idd9ucj8EFry2WQjMug/Zij9k1AeeNa6x2bPTnxuxuoLNgfHbHTxvglpNlGiOgjJsF4vt4X7vCyAfOviZgvwLe9J14SELXtqV9TIZ77x9Nn+gGoBpY9ej6Bng+SSVQXSid0NCfJz2jRzrnwvBa14u43Xj0B2az863FkZLR9pIQy2hjr/PSD3jI5y7D6J/kQq9Gms0xaPXxCXdwhDE7RvtWT5kbxl3SERhbOqWxVEx9Qu1hmSJ5r4qLO1C0T5pdYWlm2/rJtjrkD5F1OEvWCRDJzqWWLuTyr5UEsJxlitGHyphOP9xa8gy2UwcMkJOOIiuqpEHKNNY2FUukJOdzuhJFRhz63gj7KNPadleXteoubCezEIC062qTJQhvVjYpJBeHxp37nTZ8lXF5uZOIDXI0l4OCaPyuG3sGBHpKSFDADhkxcZp5pbJGIyIZrPov3m/PE3Aypcii6XkpslpoPJNPR7R2zTK/xAMobtxr9A4Wb2yOthhaIAZ9RSWLVX0TCQk0O4jA7dvhqD0w1VMNUi7Yt0ewRJLYqhXhzQimMfrtB26/ya+lX7L/KIJ9acZlKkA2KhW4Ci1RmPpHzGOZohFEB9ZxrrNTD2gLDZ6OwNe/Ee6PRgGYgAAAIxt287Htm3btm3btm3btm3bdofoIHfl5AKsrWpi92ZUHihF1eh8d0DNyMWZZDgPuKhfx9P7tNTDQXkIxk/X0MuYeqGFeXi4+EwVFjMnFSSQG8A/TGt1iDYUxHsLPrHJSQPIRDbNcHEBBcsvoVVUBAqSzTA5vQ/22Vngjv/VWQIYCvo1fpSQL+mcfQ3LF+3zD66lxjBr0s8IhCzyyZlxSMP/qpiwg4u1nfhbECOWTsS6cPjd32pWug6JUdgKnBl0u8qLe7ufxGfeo2aslOx9zqGs2FheWYYQv9WYP08JSDqmZ8C2tuJyth6ghDyFkQpCow9hDjQhQh9CVz66uqYyCjZ7OvP3Tt8HYVsE0JfSKS1p6iBa7s3dfaeHr7bJKdYYnHKmTkQr6tA43ydY92TccdEhIWsYbO0Hpla1n4vkYr76zBWZ4auSjd1OqVF+WaiskwwBnt0JBuZvb5/GnGIrct1aPuRtK1aVhoh0HFIcNfq7SficyeHcN7lPa5rbU1jrBr0Q5AcutvZYgLYvj6nJYzZ9B+8q8zdhSfdYDuK9gLL9+LOHG1E6G+3N/TYQOEOK60OwXvhsvHaoif+x4aufuaywncYGFSqF043X2tMxfR6EqBKikfZMQ7yMI8wuryNDmKGMyk9rzqSqgQ/tXUX1wRNh7JgCA/k7GuIzwnl2jcPSaRwMO+vpOR66bqI4QzxWYvdA+ZlfuL3EerZ2RXMn+rDG4DpVPIO9NC8M4V2FVx+zWBbuqF98By+YA0SrXq5i2kKQYokY9v1mTxNd3PLM+WEqXqnQGLyo+YkCRa3XjlPtu5ClKvTTXiNi7gfbC4xYCSpFktqwpPX75Bcv88uWZHuhuFek7W/dssl7Qbwj2QJyqBhrygu3rNwXGEetvxbCd5r9Vnh0zTtA5O4HWQeuGlm6EMQqLjumh16uho8bRLdvS3b0hvsXsvaYd/GPV8IItteDJUtQpA+B67EBPaMa8zW17RHZ9cARjMCHEDJ8FxI83JnZxsXdWAcox5xRrXyRMWOsl9U15xYXO3NUjQaqKAW0E5+5ldN9jXV+iCdXXp11kEPeXWCtST2n0vUpbDSdXptkwIrx4cu22aDGMjydL/0b7aJMVFt4XMKsLHPGP7rQjKTmR2Y6I8KD1W15GC0Qa9QRhmWb98w8z9APkmuzRqZQPFOMDL+iSsRBBUH2425rJynuCa3GkoWEPgQc5wXwabX0hrsyxW9E3heqy0E0qPOLvE0c0PBPd/qKNNFz1+01KSvMUoQju9BeuIZInlNWYLPaUbiWqoom59KhG35Mn73m+HthKxeZ1gZMNpk+OtoaDC+N5I+AoOUlrXHc69wkmKzd5Wgv6aiygwD9ZgDlTfJqqu31eoAaZxqMbt/vOntwSnVj3mQiJ4ZnNnW0e02xV4wbdpiaKJpdclUnuCfAQAFJkpt1yIwPr4QkDoVe/UN2r2YbaSOowuRoSi/XoHATPhB1k4q8tO/2yeJKwe1VrOtS1TaSkGob5WTgCBHWCm3jGY29YrleiKy5Md1gVmjxNIxC2bdn9f7W41d2uVHatMt8jA59kL6veKjwkUdlD0gmopjzCIbFw1W1ii5tvLw7mW4U/vEABT4zjccXOsrNIlBZDdIf9APDNi1XCDRpxef9frPtJ50LWD16Nlq6CpiK6VihNQS8TJy6xv7Lrpvt4YRDmM1ybKjvZ/FblaHvcQtXbC+LONmfIp4be0bKRku/EUHRZt4VB9TMkPHwHDfU5opu9Pd5kgS2TqBBbIYF2xFCSXYPiCisKTKWrFPaDbwmq6RaP3plK4AiGJT2S8Oo0qxmk/7nSzUDJuxrvKhOaCFxeA9avOhO30D7mVmcLdV3CW3Sarxiri/K3lbR1deg7bmhpQluysmgboUkcUoOMhMB8CrWvfJgAQW30/tgq9PvfHQOlDZo36bF3jtwyJSbuKvJkoiN2HRnPT4ItXIOkjoMlpZtGA7wuev9vczlsAzUsFzcW/WyZzUYn3zM5COvAQHT7nZVRTou69qGEo/D4xS9IriHP4PgMkEnGy4EO3mlX6V/xwwSYCW55+0PYKlYuyTyjY5lV1i99df3U96WcwiSBi3qRc9CMOyW2Pa81qkNWzgTGpplE7sc052lFu8Paf+YC1d7RkRTcWQ16U8LKNGFT00Q+SKie5LAVRs1+K7DxfqLTPBy4vGneXqNB5d8VB2adeiFtKulwHqDORgW4CONaHHLnGLbNckw8220UhrPEs4y45g0DuymFQivD1m7vUiUVYfi+PAZOLQsFrVw1Khfq5RYFqBINCIhpD6urFHGjTOlZiBqqzIgfJMxxpKJ4SJGzxGHfSnzZ45cDRl9J0ThUoH6Y5q4vsuyd6K9X7ag0GaW1YJzlAodNCenGBWerAbECJQbHQWcwj5tEahPsyRMu7jbKN+P/PKVV4lmWO2DaB1A5hCiX+t86O7fAoh2aBpkhp4Lv9CC0mXmn0o6zLfjtZglQMfdDEQ/tj2wIyz9b0beNQzZ3w+uZmlkIXDVxCi46smQPivS10+FJCl7KrNjWdx5eJdLGRPpfi0GLVfyFRQWRj/zMK4bGlMxj9U4p2bYy/hg2Sg5qJXRZ3bQ7omAKbhmuJ2M+0PKe+E817TYyRgXwf6/XEVAbFXZu8CiQ4+Pf0rbmGjdOV0VHL6D9HBUZ/6jIdBqfNzZLpr8vncX7TD1EldiIm21mJ3rRgPROaAxJ42l4V5GQqDPzXfz7tEA/5pA8NLW3zPp6lX8ilfRuwc1Q+4Cq8KHZ2fkSS7Dk+aGCu4GDZJIerHCujeCzlNSdHz53chiOaPSEL1RCaqw/8hcihMfmsby2iBqhs4iTNq7j5eQlcx7hMNghRwQJ7r8n/FQj4niClc3Hs0d7SGfhb8NTMj36n3zUtcfTF1BeLjRz80j7Gs/ltDgOMfZgn4vTemrU9m6wynsXoxnWn0RNQravoqVZ8nEDHEy6QlKNNixnrGT5Q47OzYMr72V7hq1j/EfilWVK/JEN7wPYVA4IOMjFaN3mq5POXrSWM3//np7blkypco4IYwFnrUM9yV6xsUvEpRNd2uLQBkvb+EYOs7nZ2VlejKntMx6/7Huh0gQy7DHrKVom2DmYx6XviyP9ZW8Lm5UTN7Qu55EBhjeQwVJrH1c1Zrd9DtSWqFwLpOQ+GEs3wv3YiidwjeY+qgA8uODGMNGWSZqH+v9HSOFx4nWmVeiTaf4i+JEqmMo3X2Cc40y66Zpxvf0Yk362Xmu2rtCZQfl1p9ilGaYL2WeCINJazjDsZ7DbpOAi9SDU2+3imgPENWpqMIYVllhsSXiMYaiEH0dJEbeL2i90rK0QpF0pX0dQI3z9YcAH1ZhsX8qg/jIxltQiPr0yRffhT3i8Cjpec33A8W3L6WN0Sqzd+LcmXoGzSW/Hs4nRJ7ruTYTzZQlcQNQHg6cICh6L5HkCW8BY73WCCgHcYjCp9hkxYPTYhnqMiY5ZwFbRwvFkkXvOLbtzcl+gR/XEy8/glNEB4fXYK+hWLsOrOXsJaXDw101hYUbZotMMXYEnM4jk0ZojxS15YqPLcqnaN9yRyx1tV+5Z+Ly8/BWUxkGVfS6rxCMP6lZZy/n13SDBWrM8IMzkZ8tjN6Yt4rfryoMZAPdvlFHR4Zrit5A29EbRK66h8Q74V9iBiUi5nhuNeO/JEe6g4xroYuZgR3v9RVvU1yWj+fIGw0nw72MLuIgVxqCPQzvTOR7T3rNWPVwKYO5SaD7QOtqwZZXUcKy3R/EJ1kZp2Z+JjPxByEtTSdfkMDfSltL2w/JLJPBavS1g3OjREXpmtsZPFuwK+E+8reXJheePg9JW7FQ+EwHLOhVTPUULpcQT17xDQD3EgXQGa6NC+PGoqiv86YucHbv+G2pr8TQFKKxL4SWm9uYJGMfVs0jQnDrIDywjqEBX5fyDQQOcnch/GPogo9NPuKoxn+su9GRqiwBEJ74nNAcvR09BOm+q7+vNKDCmTCqwHlGN+t8BagmBx2SMH/OkSlZA5ws5ISPXGPk7HtG5qGZvLLsJIT+6FoIBNIL9wYyn0xYd0HRoxPgxFIEfbXqCzFuniyN3OZ9TRAp+niIcME/DMZQkkM8us5Tkj5bG3nk8xa60IQEFgy7GfH63W9wy3dg/ZmnQ6LCxrWRcKaliaJjodbXwXn1fzHUyGRSQIqOvVAwKAq7h0KPyUIPPBedC9AFhm5q0lkLZeVQxA6Zs7Z/HYM2lkUMWUijFxiS/rioqIFiQuH+3KdXYR/gnKOJ0CjKdbX8ThnROv0qXUZ33j2cFwMoZO6/1Yvi6fLA26bXSQ/jWZC29U/rIdoZVWEUdQh/uTIt4+1IjZyox58Ppx3VjMvT1Kz/uqY53O4Ln860bwv3Lsk8acSba+kAZk8/5akipUJ3MEx5GzUxWVPyPz/RsNiCA5rj+J42RCmTCquWdx+YPkrdgo73/bP2U+u9OGnTiRSQos9DtihY9z1yLoHzdAr9M4E4cL9rCXQTUCrmDECJJ5kuvitFsT1kSHQEUgxiU3J1NI0DrWkZnpuVMKBD9pHpKApW/b3m9YmBNB0AZS6dLI5MsU49ZY1zcobO7UROU9F+ExKcPBPxTGEHr1EuLpKRzzh05aCMIu+rm7MrXdutM3E9vV+aqeYtbkJesL8zEwzjvDtt6pvXk2tox1uwfkuSzwq6+aLGgwKnesmGQe4FpcUV/Z2Su0pPlgczaEhOu+OSwifbascEj2v/zA5YwcHi5qhbOCikPG5zDvIgruRzhOvP48xHXzmd8KUAPkwyGK6lDZ2bbuZa3cFzaN7vP+td8CE8dRbGuydzL9/wG/w6w+mEpPPLi7MUu74RK7pFoMJvgr5O2HPjry4+JjBj95udb+5P//xHgcIExneq61pDhV/rx9Rw7f/8eo1cpPmpHrxnf7w1VEo2KoxECVfM/esRyubAJbQanSfUn2YBdxC9Uu0W5oMHT9yj4XlbZeUwM+cu/sVT0eMee0sydwfwFKOYZ3C0ZhVH5hFeoVawifbzwdy69E0WuNduKZJ9Rb97CX984Ta7R7vfqIDSnTAuzpG6Szd8v5dRrAeF8Rolprz8FrApq9GcvufZhxYtQSSwrGgXfR/dC884hnRsQhoAgAh9l/YNEte2XjMERv43KzGeZyTBlqxYgEqO1sxGpYL6OI9ysGaXd4YE4JlIHewIVtWSEqM5G4CHUm77XTattJOYX3sszT7pls1s1Q5A9B1ELsr7zdJG3Xmh465Wi60Tupg+4pEpqqGDtxCOGxAZ5hdKn8va/1I8PTooe7PugfLQqhY5uz4jCT6d5khe0mOFxAFLR8ENurAwgLOPkbgsRUbcBHsBjcFsqLZmBCucVUBRZ4ZpLgNI2RiBZDr9Q01pgBZFQjdHCTSvQRnkYfigcvRz2X4X6/Fg42Vxmb5+5D+rAspQ4pEUKEFPFcedA0QNeqipmJPr7YsuUymz9pP26KQuAgXjvjL6NSoy4CNx1q0VUDTpb421tx00supBxNZ08HpF9aGGH3Wu4jpPjuO8C4yW7GNHIfAYmOu71xlhEnDdWiQgzu8qECpkCtl/FeL+zlgzf3lR4JpyIlOg+wjWzM1335LWlEqIiGC7aCiUPZqnKaHrsv85ofciHRvuKggXvRMSTykgrBc+89iJDpTuVFg8W7vvfs8QFq0y/Ox+AdDID6y9H6+9EXiqsYQC256jlUSoZ81HyeWSh0ol3UHLkHMyTx0IrzPXTXtWwvPzdmd9qKHNKBGsLPZ4eJoNFLW0U0IljL2OFZggihcsDHa4tK9Vyan39acEw9Dpl98iqSs8B2vT0//su3jtIBpMadrRPXrWPOUhdtU8qA6wPyjt9pn0CZkjY7zvDdpiZoaSwMP9ZFJimq9d1TzFQgeVVYAUkDaZhVAzk367kfTYB7NecwbCvo4H0aNotmbwFe+zmc1WH3sP0UW+zEdy+u9TVebvyTlQcwhpQ6QZsHpcdJCtLBUIl4MpeFoBkslYTUL3bnLJkik4wTPOToCkzH92tvrisg5AATG9KJlOceRoQGIl6fzWiUS9uXtEsUX3a58QpEcUoHYhXQHixpjCIDFWn4jLAb+DdB8ZWoMn7qVfB12k3sVrZEK5E7nWD9RW2nKE+Aedb+ig410HoLxqO1Hm9z1d1ubmrULgZvBvWwhoN3SWTVOlYwLlghjsJCX6queZIRuaqdMyo/BOsbLj0k2vlgp/PzxtWf0a6ptdyE2xJ8MEdk4OiAmLEHMMs6vdTME+1oZPzB41zJGq58fT8HN1QMGYQQZn39mpAOHBmlIkfAnb4pBkhHLDeQVAi7Hl3t6avaTF2m+rtl5i9aO8Ib8Rs81NtYRSkcM9kMFaqBMyIt2UHPE7SW9TbNaZryqxBt/ae3BRBv9PmpJqG4epLsgD5vOPKdkjJa3iKzEIehB0x2ZI08QQha8+39EUe3a/VezpITUSZvj6dZ3XlMZJN0cmMqeV+8hyHoe3opOYQ+3p7YfzRitsk3scdeZaOKDPepIrAYowG11xsipKFa4vJIDYF3nOQn6fSt20EWkjs5Itze2+VPYVi986wEnoWE9PozF/DIpikakPOyvsCd+muOG5le7fAk40Nwd9KGdyCBtQp6o+w7tW6RNa47NLvud5Xy2HlD+DnL0QX6kKtVmmkYyOEcGgQl0CDXlnq0hF9fEaCGdeRX9p/FzxAlAIjfFE1ZdDpitz4TwEotMcmebWv+f1C2v89Ga3R3R3CB2bGDM2yX8xzBBRbMxECGKAT4VLR8GxLH8zlXykP4rxu0fQbejTtCru4dsdW3KqG/iMrZeaUoY6M3rAH6J3cR2MlUawl/8C7HHuM4zqAqLbJxxlvyFEGfJog3dY39LItkz8UQ0hD6tWYTFNv6Q/DCkDSJ5DoLl90+KQhgBKwV1qtXqzFYC1cOEiypcfSJ3umjV7AJIzo4BjA4bTZmQUZbZpJMceBWfAiVqGxj+hYGWpFI7aGRAfOLYxZzr+aECjVNVTo6PtAhtjcOWnO4Ma7RxUPLWhn+Yw+61OAxk6XkW/IG4oSzZnuVSqMgWlQI3bws6PZ66nYK5zqZBb8kTJsb3ibotuJB/mssB/m1nuXHEhsHEfAFzT3uJONiLALa+lVB5dEIRiyf6NoZmdxlXB73HqEIRYfsmyBEl8ZfQCLzKIqIG/uk6XnUBlnhlndmWD3PdN7V5p93Ru4nmTP8w6OqX3Iw56w/pQmA2siZG91N+9N78QTkMVuRKqkjBISq4re59EYYR88VdrM5cxS+fwvOoEOciNPTstNjeKRVibE1nU9MAhQnyDNQlq+kOpiKfveRocHqS4Kso4XjK4CeIm+nMYZnGno/MDlBxDPZs8mr75UVYXfk2JzU0Ynx5gYUzuvAsihQGm//3b8fWUfybPPFAJAIuzKQYTefPJw2EosavZFYqID0vksC2FLSY1Q8TpBGf8B5/8e9MutoS1O3swxBEayC+5GGN8rkPq0IYY8yWZr6On3QHm1FNHDVWus2tutynuknJG7b9yG58Lr5qtJzFt6bdz4U2wobqTwt4FOGB6HwZByNhGpLT2NrUSTuPIiL8sgUusHvzGahZ+vUTzeoeDkaUyllL7l9UXmZGZ+FGltbv/Irhv3xI60Sb2YYk+cLdrS2msJHacngg5pmCDKeYGO7POnfH4Z0Z08TX1CNd6aQEMtKRgXZ2j8UX+GHD1Q376Sd/pJcG+QikxYRMHZFfVMU8Lo4Zmd0j1MMLZK3YgY2N+DJRGrJlaEsFNHWQ84Ckl9NvYT+wziX+dyVe1kH3q/pWuF3SubS2+AHyadsnBok2jUqWhPHng/oapj1+belDQ4pcHVAV31O6zu20d9uipCBJIgrjCWSYAFLQ/Xl0JxrvS+BRffdD72tArufW8tYm9SXi7HRrbHOxY5FrmKn2XNjBur3C+Fnf7TEamBhPBneQ31QNYVAmwIig8NNfLuXjnf1eNxlCea5bJFsuEa+waxp7GYEdNp8/0JuDV8YkoSvNKnU4IscIk5pJFxztPUot9eWsCTVE/aWjls8kdjk/swxVtF9BxRcqpzMnSccT49Ghu0Cf/419D+9Te94FpfFU28xVcJNSuFk6KIU0hNxv/AiL3XpbQ6M6+k1643g49M+Yy0Fv08kdK4t4u6wBYrj2tA14KhAwE7yzLxQqpOW2m/QYS6yHE6Mv4eBxfn90ht4bBz1jN8uBDE8kq7OuupucTLNPQJRJd1IZWf/xmwCQKr2+kUJbas8t/DXOTPrd0any5V0ZOKJWGIhNrt8onSZG7DZ1JkhAPgLgu4WDGUcBh2ot+37hzmcTZBpDXTXGApf1YZpnKB6TsKk/3Yp3evQDM8g6g3lHlIuzM3CpITpRvvZ+egWUB/eoWfVyZ9s998eMfXUMbISFBgCOweuun1Vhd9NTJkxqVNTjTB4WEKBIAAImTmH0jYjtHpVHDaxZVmWyPb5QVTQrtX92Jsm7HRVaqe6GCanqaolj1ON99wAHvkkBauAowTDVk70kBgT62EJ/+99UOAV2E9YL9I4z99CTo+V3Rx3OlPg1QHdkfoac543XuZaaZQ382JHzgRLgLjqn1MYKac438XvR2kAjnjO85TuhTo0fYBMeNJEX611FVS/bBMAK+FmQxPtDYu7Guiu5oANCTxNkHnJVmBtIh+9RA5WbD4/MmGN25LNYPIjHQZR+vqjN28VGy2SMxJ/rGrNJGi8SB/yVHTp6xoihPuimzK63WDd37dT97CRVPAvaRegdB2iVqNAOzFUjS1GS8du6oZIvY+RAveSL3jkqRboLyv0ZQjdTGxSVxsgYwxyjz9hj9qXmSKupj0uKeMDoHNBfd0i/NTGavugFPn1l+D6D1NFL1m9PvXKUlU8U5wfRwl7l9k8MJJHx1d7VzKjbhOqlUq9VVIDqszfHgYUyNlSyjKSuvLJv+Q6dgElRBuGB77G2MadfyCE4iQujiX3DMimX8iBFdxCwzz8WpcZK9ODmzrVrJsoqnP6odnxWLIyASBAgyISP+9ol9F9OMujqlCCcMqGF7HR5lCBtcDpl+st8/hgSgGUH53n8AbST1RIYboFLUjriWkMCJTt9032mH0BKu4ICj7SMQccUv0iNzLeOIS2s3IkdX1QDZgLlwYhb031Mb+fxpkn2E8EhtuFJjDaUEKhn1WI4uX/zHcR9NfkC1yR1igJeb9N9bYXo27pDwWcjH1SNYcztVerEXlrLyMaGoX6c8wUtpQG8ITcpLyrnxnumuGpHf2ru0xtUMI9ZGAmP0sGL+Xc3/zgIRF87q2PssXg8ucKVueATsr5T6FKff886Vezq6iLFfr0LF2gbCFYoud+fzeepGeGpSJwGRSo7x6oiHaq2dacL2nPfy3y2cJn4DzROyskmFazr3oNpUXxyKr3uhwIoJ66sao7ggJIZKIxPDrdrM4SfO38OAn4J66btaSR6ZUwjiqbhW0PiWwtIXFLfexq5sDXzp1hjOnz+XDGTOaXcSZtv6/CYZ7Laf/MLy6UeMM1cUJd1ylhbslp0rE+RAlUzYDDtWTnByT5npxvtheGnMZUCXUo0W4K/Bk7TC3UgobLYBR6n9yk/F3hNK/0B6Z6Vqg3qSu3VKogpLiKpx7yePULdTd4PBHpdlZxbbulzZTaxO2aeoMs63EB6vCZ8aMM9Re9RsijhPWP6sqaahz2zt4HBbiRbHSIn3gJZG8S2im5tdtjuqIWfaFhadQJwfB4TV4uauMC+tf4T7Kji3oQ10cnDLZv1jwudLTgATZaxdB73VetQ3kcpUvKMQxUV48t+1EMWOr0aA7A3gGwt9lNfdXqC/4W6brnI1z2depShcmTOrht60s3NSilgYUeZnYIWQuIFM6mM8PW4RIUNmRINd7OwyRtHM9AqN+Z2Yg44NCsRvei6E2k38AnkFeMosj+DW9fGZJVQAg0xZunyrwQx/f8lRBm1sCsq0iNXRgrte0tTfZwv10qDqeBvTmC3O9tcwql1dhBh/TfeXKTauT67BHvQLxN6D/8QWzD2q5GLUwsj0VRmamu+lapt3UaWEhoeu1TPvMLFQR7jNsKwes300gWpEqkOZGmSZ3vN2gKo4iEZL7VkFmQluunDTIq0N8v41HE1FB5oEagnp5J6iaU9l4F38KJqoJU5N8hsN5+mEUqNSTZS62HfPBZESum3ToPuN/r3S8W1jDAgmmdL0ReRdD9elNeBW/wtthDy0AathavM0mJdegviA7mpODawr0OxXuC0cb5ZAZI8pLljAZnydQGWGkEu01RKhGmKL5FdeKgXaTSoquo1Pn6j/nX5uHyoEtEgYdzrG23HQ64CfgEtbZ+qjahgimtor27/8yNppXOyLBojGFhHKW9mbzkGz+bRHbfEadYUIwT2fsgmBBAsXzQ2ACLt9SjxBc1iQ88WYcvRMGKnzu1xFmpmU8ERW6cPKc9ZGhQKRBehFlJp0xF87ZwtTlvOXIdCpEkulA30g9VP2gVefX7b6Oxe6jRDPxh9DFwRH1fgXZxDlXWgW32w+wZzjN4C/mwm3ewqfVIQYuDgExGC0ie1OzEd6H3Vl2/K16bxWrSloLD9qySjRejb2hIQI3LNmq1ClG2lQj8OeydLFZ0Fxxh9uZ4ZMiJ8Hrzx6BSipk39xa+AHZzw31jbE6ATWnHblySa+YP2VywWlvO4C8xlQWbCI3DHgpy7TyFrewx1NiDs/0Xwmg6JZzudcP6uqRYfL78tUF4abOw3AAa0txacTM2TeUcwDDQewNIg0/OxcB7ToE2QHJfeE0o3s8yeh9dFRkz/GmY0hD/cMlkOeqDxeBfjKN7naR0O2aSH+4LAgEh5Ft43bKBa4BAMxnyn68G4O9VX5+2KyXg7GH4Op+8QSemIkk6wOjkCsx49MMo2z4GRlmmRDR2EAQl6UJ80xbVfzT+Gnp1GQ6wxOck/1/MPoJs6IQN7mSt3EmozvYvYtV7aWQqetN94c+99bvNWBz+ao61ClEJB0yz/vffKas+keF1u3bBCGMHxh+IVq2yTW4DpQUqiEB2zEXOCbyvgboJfPBg6MF4cEzwa8CoUN+zbsNOEAb4cDjMGSsrgFKxusOOU/FHVFTqiu5Xc7zugA+fT5zfZ9KEnBbxLn1GsywPVS+B/B5Wf17ofJzfIGDwcATuNVFXiEyQNgjtiqh6CQdhSZfme6yO85L1zBtRVv1T9hbXHfixS9jEOMs829m+TNNVBZXui0ziRi9i2LG1Aru8g9lZouPLOalruCH0nCW18KfrQXqhS9G5dBdychTt6f4k6Tp38rbfhPu1qeomHmVu20T+eAM2HyWUS6h3xcos4xjcMJ/WNmeBuR0MvlVaAnyMcc9IeRVVhAJtu3UEM8VDlsZr8qZysSyADxZBmT0QxJnfxDNhJRPNXYtt1McBrwbo5IwiJU2L9Vp/xkM82mbiUg/I6XqUE4IhTYTMz1wW67sNT0wksVsIpjdrwWWZjFXxhXlnp2I6J3603csxs0AxDhr8Udv2OdCQJZ0tQZeVG9S9QYcInH1SU9jNZGNEqvBsna2aYkyozg62u6EmkpdAK52Ud+bS6caWInxxrfa08CcxyLHR68v8U7GVa43qG2EsKuBtprhehe3EdKFJId7Kn2W9Lzy+Gg3KvPq7eiNFfqeucuaEFbSbDtQ03ADcHuUpK73dIOE1sa+ufH6N3SstXw+AjqDIv5C87AyzbDg7nrfkgnASr+1z24Z/VOGdvg79yzLkF2H/BspaUUXN5jl2xOAF0X7LwE/DSFXKrRBK6OqJ8Y8TXm+NhgoEdv45VgkBTmVF2fjA87uTXmjZPxN6mlS5w7Jn4aDOjb0GWuQbwpDSlaMJH5KQXKodeiKFD46LZdGG9ZyUCFnAASbBoJmrxuUjIpVNlFjHgWtVVwBo1Ls46dpPfflbur9cFfkC0yMNcIDe4YkqY75LkigrVjYS1Tsp2ShXvjcsqbsUdWyF3zFox2U1miynoo4I8kllEXn1P4eUDT8KznvGdeTXgn65EjqK4y1hL4Pbx8leveV6PlzAQgtyQPe8n3+lbImK/XVYwKrt47ybD6dO8wDfhebx440SHhJM4vnlYZYIpfDGYvhnpqVtbvM2IhcUzHaK9oTWTtCn+Y4g8iXFQJeGDjr5MsroE9AaoJttSvF9Z9L+9hwHjbI8UR84AJxBrizVWso8lEtEjsKZaIqBtL6GPwoP1mzyfB+dDKVqZ0RGwKFb1Psa+8NYrjSNx5mcJaRXwVr97O9P3mSminEUlO7i8Ta/imGO0VS6AICSxa7LYEI/0J/Ac2YWRBshJoGmcWSietf+M+VSjXnJZEtVv6FqOAVSbbNHJtqyfwsXl37oaTXs5kqL6SvJCXMlUOtIaDO1TxrLMg6I8O9Y5PEFj2ZFRMh0phnNOnzw78C3LNB1F/k74a4bia/CbYLq0CYIhrbi1Kw8EJXo1vEmihew0QS36P7r3tyrrK6cXkqynfy+0K56FJzWOSEMKVAGweldbrJ3qmXhELAHdJoqolECtuTpMz2XXoQiL96eq9RS1hYZvetfx2dlmtzS34U00X+rYMa8YayjJJR9EBNTF765OygE/+7pe/U/CxW6WxTCcm9JmcG/dV+1ev8Ep48PkvnrptHl1uTOgnS1AF0qr7vC+YYvNteSRo6IDp5Zba8CTwHcT8k5TmSBnydCjjRRS9SQLPQ0/J8L0pXwBXKvsh+MAlPmUZteXXL0DRHzYmR/W+uLtxmp1bXs0mAqPC84eMzexQ3BY+vWY6pPb5IuqAefvV+wravBM7rq36FnIwp6kQIRXqR4JFb0hmojEklJj+tjStpf1RuV+sBpmkK6vO0R1CKDb1XcsgiOd9UsfF2ugnx5dnwJZy2L1qZwlFYwBkpr3uWT8AorKesJQX0FHfOY5TvhXBGw5lHB2rKPPfH4uAOVUcxxutiKRNGzfY+C1E/7Kijo7vUxhxkFjwz+Rqx2m0S6p38ekGdxZfc1HiZhIg1i2Kjw4CMM/sLhH+OGZHK5gis3ilIkaXUi+R3028oDXRM1uGND2RJGRpvEFIksRSK5wmStHxP9GJMtVR5o4udk6us8TBHW1fLLPxyu1gvG44V9fEtBEY8Bhzk6a8p6TDMbG6I701S+hWKJiZwUQ0IGYjdfY5remFqikNel/GZ66RJURtnDNkt8Icpf/RvQEw3rw0IvRhX810TV1DxqOxs+rHPQvxHQjq2UnL8BI4fETgOeUyL7r7wNIm/KOwfbx+UHSOjdKnmjFIS+9/0PLEs2230YqpDnpon2u0SjzvGtm9DYn/hRuvje/kP6x0KaZd2qGdkJDLduo9PE7VmmcIP/TMHLw8oRBeIis+hol2GrHkQ2MSyD3kaOD75PPt6dm7foQ1Uiif0Mura9zqtPxgdJQzpdw3FvRqMqPufItzOJmfujtKhqLmy+DiAC3Y4+3kJW4AigA4f5wmKNs+Hwm5DumKhr+8XkBxrjQexSACNjNQP+oIKk4QIsOibN1HxN+s8p68S7h57qBZQw1vFOTNJKEz/XIesvdnizD/NM1nJpNWPze3CuPhVYk7BBjX2GGJfuV4UN+UC5ysj/R/MvS/Ir3jwo3i9c42z3wsjMudZ6IK/wy3Y6TjPwmUujri1qfiY/ctOcKARA+CEZA/MZnQG5bNplAFdcl6yjKJfeSiWKZXFNGCaM4u6/gKfBncTDNo8XRVHbCrla87S3ZMB8zajlO0oLXFS1Jywoi3kOlOSdYuEUilV9ASscCWLjPaS4x3DcI1Zf0lIfD9iJR8RxU9D5/BNyFnY+xZKuMfWYVxJ7De/eZ+rSgyvB2mvfFBJtYVk0uTVt2azgZqXMyohOk64z++AhoxBx91cpt9ffemGZDz6PnhkRqE0EgeVDJ4hDy8w5/0Z22un7dsM1swjEG+cZf2fNW0zt9TXjQFX3xXDpqDNRHlZurk1EsIaxG1X+wobUMcj2HhutV3LHjZmHWhiJG1TkD03xEg88OYXKW5Rd2iFqKHFSUcmOWYC8AdItsIkPkXAx8GTml3sFyHADeY1YeqRgdWuG5AxdzgAysGiEqARJc+3uOlJQd7ivqWJXFFuWtV3Zr6dsynV2ZJjE/683ECutimS/gL0+bu0vTbU8ADpOPNsExBF9bUmoDCdZfU1Vpo2qfXLUcGtETBt/+zqkCP6ucje7U6A6s8A8sZg08W0rsgjGgOnhSIfFXob113iblVnd2d4q6kH6ZZgCLlxTIQY+kIwzAZqGaduCf7AvektN8zzhqntJ17hrKsfxIY+ctO748K7sfR1gN9A9WJd6CKlAsXB8Fu46ISFL7HRNSS404NUQ94/HrGtMYKgqMtHqRM0rqrGxLAhgLVzojlsLPc8sfnEwFrtsGe2ge+FK0RoMthrQGIqRw5qa8c+20RCaGJmEqDj2DojaattgKEV/SSzpEEww55ee+1TVWO083OCDW1O2MrHLA9dK5WzED+5U7NPbGjTNKxLLxM5VXlKloM515BR9BoqO0kgV8a4UT57UpvIAfsYyBuUmR7ZCg02V9rBrO2ThIx73qkNwaUeWo2CdPVgOm9+ogNnTGdlonY7Cj0sr6Lk2zVxTsZ3To5E+SUR3nRuUDy6kXJ1XSN4GJtnQpk2RusVswC/lGjbLb0u+0vqTHq0rT6h8vxBSkpEnGNZ03fAQftHUczIc23hoVCAz7dDH3ev01FwKqkkaXJBwCdwYm/TRCpnSX5eRZqeyMagiOeemYc2GekxM+W4m/yySMPG84dvOfCLijotsrS8/ekTUQPGUqgTt6UY4PvyY/NC2VfL126vmhK9wn4dESn8UA4hoylOI6hsxUby0dEDL2+rU8KwyI1Q80PMAwB5wZr8Qc1s0oToMrUqwsaWRDjEVY3nMrRhGUCpPdT0CvJYk7UNWuXMW4NG1YikewbAJUJHXk2TCA6wGVtrgvre/QaburPwnCv7gXSNroebBJzXr+jZHcn4XDKT+KWXMaxZZDNSfTnFNRr2GPOyOETxImV7Es7ZKBlO4uOrKFeR+lj//ZSqXfIggm7MoCuH79xloocYferzLJkxlv/RDw/2AT2mjyPk84IkLLwnNZD4+naVZjmzD9oEU3Djp1ifmRf+wQaqGNDbHUR2iuYkwkh5BMUwzH5vgcWEYCAFGrdh2u3zZgKiwd20kv8C2YYU4ffOZ3qTs2QAiH6mJ3jLhgaMvLKfL1azTZYn2I66QA4+7zH2iLmWwu1H53n2yVmeb8eYoX+mw+7FZOJJ+lIeKMi+czCYRU/ZahIP/yKuyJaqCG7Ux3yQKzZlV0aIbm7cc45Vh3oXprkd4KSvvKuWcGdE3Yky9Wk4ZPkf192MlLPO4jJTzqeK4hkrESCveKdETmCnAsBf4hFQHggJepKbyk4IaSRSHwPgKiZUR/Usn9X4OOV9uXae1iSyLoGGZSSrsoiS9ZCHDqq+HE+CqG4R6Cz02lz5XGl0aEwZOHm7bv18QfahwpMZv/9BSMzMP686a3pEDvY351/z0BmCmApKhivxR5lr/aYBJK7ekScKyh4eZnMwj3c9OL+ULtSpTsI0NJC5/jM2AndJFM+KyRcckYi4vh6s0niYGe0sXHaElc1+o4wEdxKXG7lfNjMr102j0c9fsM/oPTPdtfHsZLsTF/HX+aL2eM2M3GjHR/IvexvcCLxX7WzHj0NkDt0n+MMfHpfAka9O4oB7fiATVBLVorLBuXHoz1O0usxw55G6FNKC03r+wPpDCAuTK+0bDe9AEsqHZFwAWCvb+JEQDKFGZ7IInOjrPPL4D8zVUxiAPT3zcz61xCQg8GqEPo3rO+Px2DYhikrWVTLbGiAwOravwmfoFxobgP4Ds403Iw8Qzxn/pjdjxNaGZnm+d8tf3Z6ms+JCOggLVgzkCXRHSFv21iLwQvlQZCbiIIqC8DIakBFILbbF15t5SXUBd2ASZMNXwXEdeCW8mcQ7qovJ9Hkvy0r0Q917xYXmWFTQqK80MFRpeTuJwZzAcygvuyfT9ujL00aM7E57Pj2xGAXz9JEeYWw7QDQgrQ4xFUrP5nsJzwiifStpP6BiaJNjOF10Y8d5LWYi83ydzD3E/Sonr8z4g8dFSXdN1YD89mAIUoTyaM/71VGChVDg60tCm8N2IHwsNrGqajh2X5nmHOQTuy6MkpUqTLGwxdiTha4kKyB8RrrAsizRdxeMLTzOLejZBeuXCRAUyK394qwDaeUpqNZ8dkVYDr38pFftU+mdW8kXH1H8I/9Tu0QBCqqcFmY4Z9MAMQpwIeNV1xUD74Uz0cnBV2izHVMeBU7cwJuAi1i0M5JRzOHLdiK05W8/WxdOQiP/gH5klGaa7TWPOjcL2ND3l49pP3suIemE2cyCMTmc7P+YmcyYU3XHLUizx//uwQ7AG0R2mxl5k6MZ1vEzX+YyBVfwCSoSy3gBVyrd9h2oqCcTAPBYXoy86udjEdlGIGS1HIJY9Do/It6U2WNybNfZQJ6Pa3nW0Dpcpbmsb/wEl23LTm2dQga7VmZse3ZC6KhGTKFCpW/jJfMzPxZA4zgvOqR82fe6X7ql1SSgImCMTsQwo01BrM88eZy3kAWRWK7k8J2cC6EYZU04F7UfIJouKj/AGfBxuCz4ss+K5vZyqS325TdT50+Xf0/MVCNm/eMDgGm0BJVjlq9+5BBnDROwBEWiUcie/VhcwpsXTXI3N4tlD22G1HraLomwAjPnqbKTUCNfrGpwkyshRG8xnux8aSNH1bKxmczPFsOZ5E+Vja5cPAAhXzQehBVwImum+yknEs6SC09WsgCH+Nart3BgidLuRLrmjFsMHLZMgbdAFQV4H40sYrMQJwHohCi6BT9UG+v8yGjEphaAmFGs2PLMlYjMEiSKG7nFemF/9jG1pc8H47YJU33qnhxf4Xi9cNJRC07B0XPMwcJ4CySXVRQnmS4ynDJeNVqZbdUTG1OhurlYeQA6SCQ//IaWwuK9B7yPGs1Fe4QbrKzRZJ9yKi/zWNo6/xM2tDLA+5Ie3cKvgFifcH6CH6FdgfK+79/610WxBy1hm+HhDeCzNZ/a1usLcxXNWxeeQgqYb0e+rM/UfxT6DnlQUjg26ioN6SkwLjLjpR6iMZJ+hd7D/4w3xwcg8VNTNAh2gr21tkJdCsaAO0hYNdldS1o2Gd7YowflZwIhg3c555He9rI0DC2D0V2yz2CtIbJ3qfM/Y5SATr1WKnnzU/zNNylaEoPS9vxVNtXZZVBzJTwcPbwe5AgGp2RhD23rHHU38BeGPnkPduBIC0LMAQNLGesyL8NFUMOLAmTq+o1RDhcilaJ3NgB8BiE153kcSWSGazRk8jPqe7lDa1bO6tmQ3pZdaK3ZoyY41t1yNHDJcoPGUFiB7RDHOtlZ6S8vjrvDnAKyiROHXC3jLc8qSU421lbQjX612rMTZou4YyL1Gd3RGhskmQAU8l0zHFHn79of+2ezYhn0hRx2q8Qvz9A9DSJq/UohjySBXhewQkdmAKzyj9cZvZcTRZkTKBv0ksz+FfI3l/rNT3zni4aF7zOGvEmijKZpcO47NRhPuLTD0cg1jUikCyiUMzZQGribFNG+Gzr/G4QXXGYgCdJO1MdmzKKVBtf2+jQ7n/pyWzUpO2kH414XPo2p/Kd/Mu0lzHs42r/kDN4iS046PCwhxRy01uYH6xMbLbyXTk/ew4VP5MGfcS5oSIoJPm0grzeGvniDYqgHRzj3J250wyzFK7lvQ0Nk9jLCdvVKcrMnDO1LM46Z1DlvsXagJmCZwzt13IXHwzDosK8LEXDp7AaFhM5KYBbL0X/OcOHdSXuq5CdYDG62SpeO/cLVF46YZTdVuALkUamJRaWJksD9rW5m6FdaGs7aVJq9WNCH72QaqyzrbaJyH4FYctpEMHNzI8JFVYxb2R3GI4FTKqwvHmBRZV/qMBVW/3A9nZemdtL5OPA8wzrc4KobeU7zrKP5f5eZFPYVkBSWt7AVEqfSHlpOQvFavLuqfDdDC4aEDXTBwZTtCz9FcaFGheVO+/8jAYdpAP6JAzuIS8ce06kWZDHUYASaXRpr6dTu20CulI2zAHqqsNNb3vugiSifpeDnfnrwrGk4D7eaLf7sMtTZZh1QSqA1DgyaK12OzzRPjXvbsNvdb1Yvk2aa+xgwAanK6gwVHfEBMhyS9oShaEVNeyT3e+F/ANfF5ffBpeVOWnVzKIJ5HS64akL6rWTdVhvj1B39J+X0R+X6/WWLUngsmAuJvrgO2G5fbUyiluf0NXScEQIUgrHbvv3re7iYVvrOM2KRXUCZCWswqjcci2E6tVJTRQraYokv89qDesZyDMouax7Ipi2z5vOUMe2UCoeVaFhx0JU/EnxzPX4JJA+gDK3BsVZzmQhPmfpbLk10W1+A7rUlzu65n+cBOGxVQBGslFW3Z68m+GmHj0tMV055tRWs+zQtj0KCt8L/2W2CmQ7PGH+2e8or2zE+inuKkrsHZfNWtuGOKZi5lTdc/lhh8zxjWAiR7GK5i3DROs2/EnhUqiJxtrYFzMSnBIbOP5xrDo5SZlJYLWH3kga12JPG0H/FCy/TsgxueLv4ZX094jsxtzJQU6cNbCvsuoRoGqQ8zD4B1JCOtoTCQ0lt9bvbrMbZgmE9k4btz2QfOwCEHBV4BZzTyFxKkItRPWJI6ZKukRecwO23uLsTEMHf9D1478AVSqC4TDNJDGaUrv0ZIKDMgZTMM+JCMHF+RPiD2/nNEbJEHDm4DKI20BWr/mjPsu+S5QrVLNbUWwwRwRnMUvtLoDjQ98OrhacaNJ9/7dfcsJmRKi1wk7VURxy/BVGhv1L8TCD+8TrmX2VYZjYNu/ZMkoD3pGd+pwVPlISI8MIkUsPX52iWjHCWHlcvQexSMtxdAfH1OH5zZZ/ahEQWRuTFHuGdrPUiRKz2ilBtMMeyXS0iRYHtsb6QZPObkfFB3hv9no4VVqX4/HYybU3SgRyB9v4UWU4OHKp/VETzeizBgSLbRlnXIJDluSKOmDsrlchoMIGXflUxd4Z0CwWe6ltxZlFaW+CNmh/CGsIXc268Vh8x6uUspCPwllczufHL8XGgp/GQ1qWGR5m7YV3NrmSpwqjmWT/LoMxqK5CnW7aUabodjHDRstsMXOfgH9oavVAnYl+vJFXnWZuC54tTRdLLYaX7Q6OZtAy7iJEAIF/ZOBOIlW9/KYWme5EkGMnYpxcvvdpDk07pEpqDgP2kj7n3xX5OERlvZskovmsNLlohYUI309RYEmv27GvOWvtvYcHSra1qo7YpLrwhaVN3+E6W4Sgm5EPQhFZfZNABideWJ5BvcYxKgNAHvF9xf/+hsQh/PrgDZ6bdMoZGiD+1Sj+wem/iobD4Fm66QPEF/Xy9MrP7sNpWE7n5vtxXE4UOjAiWN5SEBZ/Rk2Vehev6xPuL1NLi4jhjmmUtafH7N8R1w4n/iy7ZfETBRh7nUIC8qUhMl6k3ceKdj4mWjtD3XenvpezeJUCNWtwmY8NUHostgr1lhS8z4uUPMMkELkKnd9pn7AngqC20KIkdUHdT5irc9WQ52hLd3lDbt9CS0tyGGG+Jof/cwmqCvzqxOF8NhxubJVtGBZGtEsD8G+kj3IqByryCe7ZvmFAypfYB4nk/Gan+/xjtneMlt/alNmlv54hbcUntPwcGdC1UIhZ0Bpl/pL0S+L8+dxqhxt8+7phZpLT7PX2NNYZ54UgH7xk5ox/SRgYGvsjv5y6Ti8zZXw2O8d3N6FRejmWYUaYMavlBqa1g5nDL0ccLbIqq1mlLGcN/vC8klp3I48eWbz99m+6S2xPnpQiNUglMOGK7MrZOIXswff37YjWeHQ7zv3HsHPLWO7AIn9faD8HWH5yQlBVLoDmZiU2SpCCwP79/V7p+yJgWR8w3apoN+WfQTOZyz7mxd9GlGRiJHuNrgOAT8yS6GByAOyz6C2BuFRsvJdR5gD4iPI7vq4LrlKvnJtaMw538JwZGvHNN0D9l3BCJWjlN6/0+DDgxUJhGU/g3xiasNFXbJ678xwk0QblscukDryOh8PUNkufssiJyI2AQ5EZJezyTLmS2sn13ASBtmjSmNdZb6z8ccgoLtBccuCPkWGfxi7xlT19QGF/KAjM//X/kOJ9/lmY4cVWYBMPCcnIDfCJZKPwpuGMfxsadfrjVFBKJYYhNlOmottWziMLQwLRNjJ/EBPei73LU37h6+MnEWQp//zYaDVYYbWPRw0T7lvv0AaY8AJ1FThs60V8owCgXpAvpdUBSsMNuyN4aMyDiXZ7OeJMeMwYonn48UHv7DUzRBq/jgKpu1uGOGwIVN8ljJEEb00OKxUdA3/12ser/XjvbgKl4Clo9H2a5b4o7l4o6DyJbzdnPlM3hAosaovqR2nVT7+JR3p6H3ujkPEfhV4IMU0cV+svHoDUI1yR5VKF0gJ9RBoKzKJ5zMyUUPBHur9CWIyccHgDyX2XF6Q/fKkYpK0I1O1PifUCiRzAV34R8oUhFLsOXm9wPY5Sc1k4UYuroZ80yauxRHnFrCqx85EjlSu3yNT7BjXIDk6c9c1gfBC14ZqNfOrN3f65Tej0cS2675IGMMRyVUqkJ37PGUuWO3thktYLexaTtItFPoiIu+sEpWSexii9LjVRuP/ua2iUALQBf9NIRNnVcVpZJ2DLCYDt+PF9RBw+5160S5gC258642uXp30IIQXCjlPZJwBs90f/kpDxlxoRholxWiI46yvCXWs7owUxZjqYEtW34wATQIDvprGGvvtDY0R3VZ0s92cSakSzMFplZb9IIpLQwjrHzPKUAq61suKTE5LkXjFwsBHRnR+gwYjSa+nl+Kwbcvve8F1xwWQlUo82OB0QPuwjwq3EpFBV/XR1Yh3BlWH1zp/BSKVwGMKmEZfhKsJmZj1tVkxhLJxDF3px03kVZ8l0J3omi/hmTVAtzpGekD1nIZDj1Yrf1zo0d6aBUVdBmyD22Y8ZQFsHFYZg4HNMyx2PvWG8oF01svvbpoX7HMO7QG6kJKg5CvP5joM+XTeI/dMUfX9CxTNNmhPc9enqUGPjm0A6XkjIH+DkFl/SGx3AMzBPYSlKCS3HfmLHPtgCv/NU6KGN0ryqVo5RJUI4h1POLNjYysbspKln9ZyCA62BOB3ro4eFDQYErEYL+pD2dza93rYwT/zbNd5aXhxDpkhdXH0PS1qla5klUofTzfbbNgsh0kxI7nut0gx4TQ+7FmFkw5UNOCF+dSDfCtQsZjpZd1y9YGYgl2S6gawhW/MxF3G6y0DibAXvL1ov2Zkt+DanOsvELEquHaMoMUAT036hybF9/tLzfbPhpi6Lnm63tlxZMPzuJzDfwsxYlCep0JAB3vS0OoauQRh+xFd7T1U3dMERssi6U84OUy+RStyuDjTupXh7ILt0zPnNN9Sf9RPFABySA/Pev6dwKn+oP5b5tnChmr5ex3yKWz/vpimntitpcSi7j5dws8R7u15MJPZXGEgfKJdNaWyuRT96HE7581JDxJ/JVsxz0oV7XvXAaCUVFJs2gpIoiRjVnoQ9bk61H8csaUOY+n6wNiqZgu7wsbKJGxe4ScUSaF++Q/0e0g1ctZnYa3mbmBt8aWRmiA09K8j9KgjC72mKa8G4gsdD8N0F/jH/k/yW5cNpPyI+RIei9F9W8h1fr1ipUxfeDR/2x4Ixgq7tbd45BahkeXKi1oVx3z91gh4GID3KFog0J5HO/TfErPQENmqKP8LsSNJAGgjp6q31snEAfhrp0SG9/Ch9Ajy2gX4sO9T6Sp7T4GWf9+ie244aJTAlDmqPmS06yqIFrpH2mkTcF10c0FiNJbTzI75yW2U11XLsiMkPK/NEAUKA8uROnJLzISl9bMxqEFpTcSzJE7uvJjSvrpg+QRqeEJuFkbtK6+bTJ9Kj7NP9g6RB81wEms/n6Tc6+2P0npLGGQIakPdl2k0iTYW/lYOw+sdUmt4Ftzr9V8b5tZnkRPwf6faAUAmDAAA427ZtTrZt27bNyfbLrpdtu/5s28Zk7SH2IB9OCmZzxxj5zb0bY6vjYuO7Zc4JPussRUKmcPDFY8ZxNJuwhwQW5GGDXf7T5DycgHWbEMJNyix+RVtREZK9N7S9UGAhAAJaZI1AtcP3dzEO7sO5U/aTcvKMfCd2BUgmSBIuzKwbmHgfHIAfimSMuTnjmSHR3DxhOAfJmNMDprbo5AJ6w3yGstz+lccaLH+d9fIKO1BdIQFR+6o2W+byhztkjn748k/Dxd/Jp7CB+UHQ7zaLlG+xkli9pR6pH05UISV+iwsH2IVFcxdqMx73mlP2kYlJYxkEeVgh6xk/wncVXgmiH8ElxlavuG8CRiYVgfyPZSnghx/oMMwd53FqvIVF2pNlGINZPR82IADRxu1tYavub8sT/UOSJdlrn1Nrxpog5OEF+XkNWPp/zJnFVNNYFX9X/50Ve7g0JtI0bQl9pa6te8sg5DMHJY/n4SY1XHDVk0V2fuH4RcrOGCeYoxlV8MCH4+s7pZznNc7DnAt4wb14rt0H15XmgaJbUyI2FaZtaYm+q+tr/MZspowPdC1s+gCU9TgZ/9OPVeLmCyTRGRaQvm//hx+btivIml4QlMlCFK9mLP2dKKqStkR6Dtu2xuALHLJhNIQqjUN8MxE8L4pjk/yabcW0ZeXw1T5U4E+Pin2tpru8xd2y8RPDK/JTJ12i9XpOXuT1yRhe/jfSK90T112tKDGdPUUp2wx8XhSU04AD5PKK8DpHs4jq6VTddrRdC509ymXSgOlZGBymjrcQKZDFbnAE6RX4ViKVvozzPMQCLxlF8nJQtw8Ey1pZLyOXxx4SWjf/rm9KSdfiue1OCOecK+GhY53qq2RULQZCW7o7CUj5M2qnxapZ6lsmadupWbDejfekNRSSsI+slJP4hDiNL+w4U8hXyu3Fh/rsaM3nABzErWISkvb5iAap3a+BPDFWRmusuMUMVOsB/4VW3BB+XGlTjKaP5y6CdJSbDeRjam9j47d/4XIKvv0u1IanyRl6wIP2e4npWTV3mmQxfQz7HM3V1EfpZNVemSFU/R3F0LdK8yOPuk9fGD2EQnD0ICu3fn7tQtHoiVpSm/eDNKWvINMIvb+v6W1t7A7kburv2yQRClkxi/q055XTK8xPJGHUFQZBdv2pApcgaZEr1UgK9anclJju90qU1pHsVYyaPXg62sTKumtMqPgm8W3/8RY4I8w9qrz7MazFWMuxrjMPJCBUDEo/p/jFzpv30EOUzIS1TZllqAkCilQ9CbaY5yepaHhwgwOaZtUbl30L/9K0bK2jotnK4ZD5QFQI3Jrx/YZBrgkOYrDV13dny+yAh/0k5WT03tIKK5kZyEsjHruByrfCmyxAU2FTU+6IsPbbXnQrnjCNRz1e298Jke8qEcot6+m2q5hL7zzrynOliIWlnWgWj86HBqSiPQ4DLOkYEnriDDEc9NvsjL6zNTWEvbYBOhdLEjAvGy7CKNJ2FULPz5ZiE4x2+SyXpNlQfu1vk461sKgvkliU2zgvcu1j9JYa1HhO10uz4yGzjopEoeftAvUVOGdJzybuJ4aqnSzXWqjtXuJO/7Xm5MsLrl3m2SRYPsjQ5In+sak2uB6qKYFOGE1TNNQWsPCg4Z7lJwWQzOxG0e/GGMCMvPGW7uxl7cXGHnbSssKB5eXsirxNaUy71ZVaKemM2BNv8jdgimYdLy4G1z/DDvl7aEECErYg71xo101tgZJLwnZNFMtzgIYGGi/P8CkMjq039JD8VZ54r5xSEN8Yq/wBK112BZt/2gTit5evFMuWSRpTp7+CWrjChLg7Ndkns4tj+ZITlIpMf71yEl9UzK//GRL7DInc9jJv4KrHmzc175GIwIHaTW6MTkJO2L3es7OdNfu4a6eEHKWwppxuK/yj++d8uUJYOFt0Z00cydafRYoTaGr42U0L5FEoruWQGR+V0H6NaCwJ1hvBYoHnpOfW3bygTiw6mtICUWoNluRG13jiAB09fCwK7sS3uWRhOVRUvgZEvs10tYGOXC3ILNgA/f/jLMQj25eSToUIEfOuajO1E6+YvNfqAJ04DtC3Yu5lykZLdxZAEPBaD2DDHzrzAOlyFsYAPaYZeW7iOJ77Kvp0ouvNgEThNt+n7v5z9mi8SkiP6hrgASXJ9kSnUXRekwrkbLgKGrH5qffrG5ykXBFoQv+QNXj8rmEljVygpoEI3RsWgIzmemTnE+AecZJGzOfV64YvtRBBtwRzmaWIhNMF302xpbb/tXJXi6wMd3da6vdXsuYXWa+3rXEKr3Vf8zIax9lqBiyqCHHEbZpMnto5F+ijESXfMcmU63jvvVjVRsxCZ5Mrv47KlRennE8sVyzw3A3u94wjcQb6zhvaLPlCBkKgbxh8504J9kkur6bA+qKnNRGfiamTHrHYVmRSEbW07m8/W7R8ZgbLLXoHSciM13i9l5PShntVyXLvZ6I87vjgsgHnS/erqw0zvXz6gu0FF3j12sghPoYo3OJsmcZ8NBtUTRmPMugOxtMpv7HsOlgoVMQNSyhM3sd0VzPFpPNVyAfXEjsELnXdyn5zfPNRYZfblJCjIniIWYWxFQzTP7KsiY2HrvhDZN6IHFIDETnkeZEpUqu8pNbR+6VaycYwefLuLkD0SO/8MzpJZpe0lNDTKXBpeDEVvFNNlzqsY7P3+7uGrZYUQV6TPxZEsWvAMZN4NvDqdn5jYt4VKSMMI2hWlh+CHVSZvNyZp5g4y4WjbSfyh3kRQiR3sxN6sgbpNnMKEnnatW3H8mPkSMw7IGxX+OD2fLXw0tQJUfaRZzhAaOT2HPr1WjXhxL/nUSzzoDrI7nn5JLBXHqtfyVzdNZ16LsPRPtF6H1Rae+kDU0xGKp8aFa788mZ5Ga0NrKIYjlb27b+46k9VYqg2Rt+lEmZL4nD87f4Q6JQc0BbHWhgQjnZr5fb/lT8M/ZVGmvB1W09L27q0cBzo31KWQcWivK33kv4YA1e5UsOjCqBORwtqvYaYk/bLwcXGelzvZ1FmqKBQBUQkgdscsYrKlsewxdRslUTeb7bzIE/lqpwzwQ5eUPMcq5yHAK8WvtuHXOXY6TOL43ddMGRhrHITyGvn9/qadA13Wtv9LMKKof1EvTDULhAOYlQxkdBUNoWpIoAM+FdqcirSGDl3TYuBYvunWmdKFk494XFJ26+3Xn2zTbdtOX/YxanJeARfTGCEprmimxbMy6OwTFyssu1vDJ0T50RDxn65PqCn/TYTM+Lbni7lafq+tCcn3CAHJ1IiRiQMmTlBFhd5Avk512cK9++FHy4hM2vi5FIh4diWLw83vkkMnBcw2mMi9piLJ3HmRw8sAvKm+ImpborkBrlsGGSZAo0kw2c/sTqN6Knj9s6YZS2CyA2jUt6Q5mkHCgOEp1ktcagKmF/JXI+TFu9MWJOBBNSbcSEOVP8JbPubNb4FXQuGeofPgaaufO/0iMg0U8+TrYy6nJ4y6K25j/bXrMEXrIeeE4VdGqx1Kz20d/wn8GHDZ2Rq2zAg8byvVKAvSuH0iB58M6RbVtcgLfHlwSYoMNHPuzb68Fg665BA52J5di2XEQeTcjEteHSrQBSIgrwtdS+tzMpoDx+8cbkFD6xj6NNhuFnMcbHNk1bNbItag+LIlhkjplttKlCuEDbz8yl5fulFAXEaQHGdyYMndjeatbVxwMxlynvX1Bh5axLk8b/ywga/VHsvL85f8H2N2TGPSDK6kAHPtTsEcJVT97XMyZoP5L0KwdPdgt1PsOJIQ4o5zUTnBYiH3vyB77EQovZD+DsmkE78Aa/pt1xAuMZ/930OMZDK3qG3pKtJq/UMOyy6z3odqsdVmYnzvOBOf6v77sbMAA/8ovmcaULrNm2uLn4zGmAgVZYHwQm4VclAMYLInU5az8A3emicTjXVPlYfh3Ia1voJCnIHkm/Wnn/hdlDne1KXVPyOeYZqvlsMKizXbJlO2xSNy86U3gQA6tnBi+z8GNpd+jZAE1CjeXfHPSN2aHYIKHPQnHKdVFVOJJhQEvALlwmNGlkWFn3yb3FUr8BTZ7CQ7G6hasFrGG5UDT156IjY9jjbryvHXIADX0Yt1o22SalR4awf+gcZprzrBSxGXsj8Nbk1WKhq0DDXQaoBNeurnUrmU/TAtUFQ5VryRo89PnrKnEsIbS+wGdM2R7bMQc7B0r1aV476DkqJjhQc7pUqIY4MUhs4XGO+Cixr/mUd9GUeSYKsZliFaVxMWV9X2rC1BzSk9T+viUgpAfpAotm9lf/qtSQFJjbj6d67Di6FUst2dHnFyPgUywpxuNFW3Ax1rwxDyGlKtTmteOe5R9n4wdE8GkstDVgqT6wVNWmoJf6lmkwHvZerh0b55TrsTs+/2TSfOGZ31LB32pDUGAjazTlgNimFRq7nYF+jCIyQ9CB0KrxsNvKug6kR+1zAOiqTdPytZoF2EKZ5spX2ss26d132UgVyHi/indMCmJZz7RcOmgNm+vnlGbKmpuzoA8EYwGSsr0zAIkfTfR31aJWAHcZElCzqqGEu4a5tLRFQQHgwbt98t6ro8kTAQ45I30kC/gCkikrOCcwQp/K08XMnt8ofVQldLQ1SbfsLC7TEaGLMHW8UZ3RLSZsOAG5XhzeOSa4TXEk5JebWeOXLROa71Zwuvurr1TNxsq2V6r1rfS+YMrfmFdxUojN3x9yV++Uu6E/mHdbip1cfTrFPBgX40QjSZs53Z0hz5wEq667PDnLgpGQO3tfoOuf541ndkTPWhGTyApT32WN7DK7YxJweuAHkiR1hq/TdxPjMf0n9lE/Vnm6NhkC6H1H2QYy+M54qSxXDC/oQ1eO6o3jN2KASM+/LlNwqf1NlSKr+MpQxg70NaWWsf07waYN4pqfC0hQWRZyfmgESKja+LH6RFq31g+g4y/wGl9mpPzU+SB9P12+GcgHfBgFChyYX7nZw02abrbJFQvriiGqaKI96JAZu0qNALMR1wfnbVj3oxRb+cp/xVuaTNW6M3jmnqkY5rgbfrdhNgc1vkpQ7AkQFGJfFczQxMm5r9hobC4KdnXE5zT6YCUUeUo76k+7Ww/fkprPkwrF87cDSq+6pRSFYUdLc29TfVNN0Houd9Zoh2Rk7hK2A+RjR9yqFBwHTDAIObhxR750NNQIP00IuY7GyRUG13yL5wOVWtbtNbNRIEaSSghVwvODWOd3xDUcwV+pjDlZ7ovd/bXZ0KtWw87aOt3eDxjgOybiXCj4koZoQojoMXU4CpKSj45P5nkskdDwIOCiH48LTKrJDGIWeNjz2PhrJdKFZBp6RNZd8bofihwfBjFbb8sLk+lkXaxcc//D5RXhzr+P2OHBkKIY9g24YlxO+JLbzKx0lP5KXPpZzCQhzniJi+8Qb/aeCXyHr/9FaI9NCMcaCbKJkjQMHgtNXskbQN9P40+CbsdyGVijm5XpsfNSZj3lduIEL+lFCXz8r/Tp/1ZuMWMkToC1Iil1sBzlCmvu8AkSKS6M3/3RzgUJJ1mZ8IHFOjWVPJg9bgMkRe/mT3jKuuJFc9CQf9Amv1LKwlol4u7aHdRzm9xFkQdlqXKVppImI1cqLhRqpd0VXtGQ3jRgh+Bu/v2QZ084Acc721SQ7sd1ry7RKSum5PhPEDZMH1fn6vdzMrk6uTYExTt7l1S+3d68twSRv7HjZEjHC/imgnQ8wetHqknTtlXMyBM7Ejg725XLj+tfoilyazGD45M4BUohsKvrRr4+Vy5IOCjSw580NlTnmF4yeAzW2KlZj7sjsVW4YtsBZQPo+/xVJM96J3T/tpc6C74EWn2iGtUpwaPoFKaIapKm0MBkQ3EuJsDiFvIOEbXyWc76fsnICQ9WCh4CiPtlBv7u6q1KA3Ot5jY2o7povV5tyFUxPVjXITvKY6/6jhaAW28WrpPQMn/VoYp8rgCENlCyqk9RKZ5viwfJrwCssU3gFGcI5s8DJ1sCTttEjUZzYTv3onhDzj0pkp0AZVKPOelNgjuqoWgtYptNgs4X0Gfm6t1lstfE78dD9ldRNai/QAwwpkFGZNT9aUPQi/Puw+iqb4s01MQdJs4lFhmdE4HXQhd1NvQKL30vHwhE+2H3hTLfccmhm+d7OYpDIbTscDrKqpHcpu1L7jUauRgrLJUSp0soUc0Vq/j+93fOO/X1ZwtvqMo0S0dg3H8xMV/ART2ZziSksK++afy+RTCd/wQL2OmMmz7xKGXHmKN0Uk4Te1IvQEnGbxNIggxTcn4lA5+TFBwk73rKe9gK9BxePSNgAfA1wzlhdJq1P48XGlpeSCPdEuCDHu9m+NHvehIoDmsE6yJLaLvwxyT+PXGaRO207VrKcEw1n4joPMvnltujI2P8hNnonPVKnlfjolYHWOcHF7/sP3BlaOA3iY7+Sr06IGaCuTPhzPq0a95iUeg1cWGx9pjm30/pjMTU4lN8iamwaifBfZqbJ8Xo1Y9U17f/xsRzTLgv2r3Ixngvg7UCD9F6zKOfp9DrArviFA7HUJtIZ+9JfvGeMQ26qsGXvdpIw5ZAIfSilypYcikW6XMoQsBcn3yYPkltS6sKqY7APFBFUv9Nki9MJTa4nLYMoGH+jpxKkG3/nmY5I09dB1W8rzJLnwh7DWa+kjvkGaf0T0UpLRfhT0Oz3GINDfMsqMrqJA3Zy6KyfrRUkp1CuDPaeFTdrTe4BH6i3THKk2YIvtvvdKwnD3zrlfVWOXcJ9uHyjtnpY0mTTuQF4A3YkhHt7ctO6IDhLQefupnyvt9DitWgsu1ElErTmGPjrnQNPRHiv2IsAFyaSSg1aBTfD7XnJ3V9RlISTZF4eWxym2M/PrK6iXgiVLK0BiNwBBZXK4kfuloTJ/6YdtU7/kpMfqzWKlmLnEUI0nZ9CJ9Kqvp8E7uMssCmHXv9yAhhaBnU58QAIbodzKIaWqMbgBVUdeQbI+NA3DURUeHkT/lADDJzn3f5t1VoA+bj2AijPgQQHt7AR63nRdbpegd3ViaJE9DvmEReai0vmOf4Gxd1Fw20F71MF97zJV9x/6WtBRcgedSvoXCCXELWayW4ww33QsxFThNGFS0FIVO2XT1yuVA34gbHeIVWAZJP5OmNdthxVMMSIUGysLXQt40Fu0Xh9mFaMP3mkpNvnpDBeavWEMKHRZW0PqDZChYZzZ80tK71PrXKDivpqdJyPo1KsQcHwa/MNEavH8a9hf3ZgRN9/HYs8kbIynZ7oe+XeisSZD12nDmZOY3h7hXUxUOQ6inJGc2hBu/HxSkDF2SJWTB2y3O4OKl51Wgt7WVEKmSK+A8ObKqjlZUGmoaVGkSn3riwQ75YwSzLQo24jpZjz3DYOgGjFwRNWFwx7mpd5vxoK4PLQnmppUJ1tjfM9kWmSbFpFjL/3wjFhMxm6XDguuDACYABsACpJRXZsdHeMsM3nULP03jPKRUWcXvMEDocSP+zZcThjOEE3zrgKuBCX3aoqpxpa8UKOq+vOKntBtV0F6fIc3OIFOxsWKDrjlALwTA4nDLvOD0GJDUxnJAEx+lq0kzgE9i1nS/nKoFBhLZG32ohTQg0NTRypkohRgmUrPsx1eY54vcOZCGMPKvCFiuYE/uRUe2/lCuCTWRotajSEFXkLmzX5mCKXulQEcwGuWKksaateXA6LQS2HlRwqiOg8eTd5f9GSd00NI6GBtIu4jJJFQIVGX6o2zMghFsLtqZfV2quyI7GRlMH5qpq7BueYh1KMYZRDChvK6MvpjB1NMRaV4yVsNoQAdjwT9dNzrF/KAT+0qQ7TdP0O6RKQduqLOQitKzLyhwZxLZAY8Fo9HUzpH2jK6WxDzjXTnItHpSDKZ6wPNooQsCVi47Aq/hilzlg+BWU2gMTIYYdvSiQ0j8N1HLSHLU7Yc29fYXv0X5rs1lToazTyroAbb9wH67vP2bVQ8i7sRiWeMIx3kw//1UuNq5qW/xn0x9VMad2FAvRsfqM5gD77bBYNHQMCk4+p1+RQSmqZgTGAz+fBRGyLNNAz1JBZzoDGQ2xdEsxKb2yGLtZVdvANFDajyI7n9R1C5dtxg3sl643gL5nReIH/1JwydxYBROfef7HDURgNvbAaFosscvF4VRM05wIlL9WU7LTIXw/G398q3RsYN4HpVe9ACOdZWtK5f2AwwG1ssnaJ9VQ8f0nFKBDfISoiHTWbxrODJR4FolpYqpH0xeZqGuVc1hhsb9piDV5iUD7VaWV9O+VBmEnKZqxo/qnJ2q2JDf5Hbw60yC/OQurqagvujoxCxlYLF2SZfS6Hy3ULxgTKHKVKA4vtJIhQcLUpNLTGYKpWmZcwrSrKmCGmzgVFw4raj+eI02OhFzMUaR5fgE/LJAs0KF0WhGbC1rT7qGSM87zS/U6Ky7JO75tS+8q8vX4iSZZKlfhv2A+wmRSC/dDM2yHGP4GaE7HMVtIj+JzU/oBU/KfAgZ/c7R0lM5XkKdvHv+iiV+EXoLhcKSQOfyjmUS2eslkMbXqrf+zPL1QZjZBbcFM+1JrQbwIxB9e/6DfZBYLOCZtk/Gcf0YnFPY9Jt/8POrV2Ef4jCs8Eu/ayWc3mFLE5t3OKudB3sgZ5pFRCDAd1L6KSJLVgnb4nRhTipTmla2C8YU9/VM6erOjgbKW8pRJ+1Z/vt8RIw5btbnNs0MPa1lHYbvyEpaWq4XNpK/03ys/qrA85UC1ZiTx0L/S22vRgvQePNQsP3WGVXzW9DUrwtdWS/hqJTPaZd+Q1AnYZ9+fRjQPr2rGhRu2TW2eYNmzvmiTwu2fZqjO+U9m578W/hKjegU0qB/kJoQsVUevsofqeFZ5+4mniDMsV+sBZI4MkcSPmSkeSIVmme5JBM5XCf/q4UpIEv6X8kkV1+9+KhS1HbWGj+iBg6AO0sprvIAhsJX1GInpkOKPgWQHOrNBVcoY/irYYo1xJVBdDuxusmXbsGTboW15UYl8Rg9AHr6H/Zd85VsIQ5x3hWopmVzUY4jwDdXemTyyrpN9Y3+4hHQ7Kjd4A7jYqE0z1Tfo2pzJwUI+grIKMPBWKRn9DmVl2zm9R2gH81uWeOePaov3PCdthltdridHKt7JN5+g6G8gA0g8BBgmTPrk37D/MZ2FdE9vHs69CBlIiRV6eft5utSsp8L8NG9ZGCdGOqr8SLbFIejVN8geqy9/AKmTnq9whAK7LZTb6BqG9To3bfN3M6SPi5rbqa+JeQ2tkfOvmm9Xc5yLj2/mJ+aJua3l66fe0aoPcLlNAD1YPy1XoWWIMC0m7PflL+0y6uP4PYbogdwF8b5m1ocjbHfFFPjp3DwtBaZ13P/8q76tSeArTUJbnF0nCbiC3PvOVTBs2s2HV1+a5eSSkTad5kIEL0L4W2+ZPXmaDzwBRwXqJeCTDXVwbOdFjVSLqI5rEX7ztg9zR9y7yyaSLBhk1pB/nqmP8rgRMyENh65njna+x2KStr7Nhzh3I76GZWGPqEU2sMxoT2Xj8IVPFI8l8fuZFCGhL1cEAi70boybaksS/wDjIO/5hoHH/hrdPaq1T5N5OiDf/UeQ8WmzsQkJCpai+TNCO7IKKY8ufk6T+qf/K6nnnObz6JTHf/mNsMheSqva/DxGPZpYoh+cek58WDwWAXazG99Y4Sjoh41eFZILyTir0Y7NyLtSIAPniTvtnekYqx67t/qLF6xvvro34feHm+kd0RKxsYsmI+f3gl8BcnrCNJwd72wekKrj6/Q7Xf1BV5Qns3QT/du4d0IKIzY2/o/l/WXxjsB4OwdPrW+aALjVZwJsiFWHZKWGT7CpcfBoTfUG2Jbqc/tH+3OesuwuUkmj4lTL57wK9lkir55OFT3Z9+YaSbkddIEUAtjFoHtsBSik2PpfbUmttjyRE+L6iqjPjHBYSQ0FOrxpA2OHmPV8k4E0KHliRFHqk+UFRSAdG1QHxxMi5khm5yIBxhOkfBZJo8AJQfDOH+VMX9W76OXpaTPzcL1cCJ70nyCss3GCnSRhSp1MgMaEdxYyYAeougcK5V7SpZSQOc9r/tA0fk2rswd7FXzh5pj3IMNyz9UxoOzFqRBq8J/bcQe5DXZL2OC4A6wzxs/iie2OUkDh0EM/bt4Aot7a1HeQgQqD8RZSzwShSKnbUWe1znRbtPNLuUCgOp5yCOGzTJiHQFqgEmhTuyAf1kuEchZyOvdwrOBiXOnFl/WJ0+2skdvgNp66RCR6n1Go06J+wDpeql64idsVOqrW+mGErV03wqguENIyoIoNIsW4hYlSBeDsSYLkbBP4j6bJsbfbFhJxBeFYv34SlLWq9+SsdgfxNEUMgmxluGN7et+ZjeA8/XZX1PGU4bb45O/+GYGKXqn0GaDiAWP1VTuHCizTiUHTh6Q6xiVFyh4I5+wVNDXQ9aio+PoC3YY0GHN5BSYN40Vs7GDBEvrf8808CbO9xBQktQAx5NNmTxGzK/1O0Ge374hlKKcZEBUHu/4upxR/6L5771QGVmOAtFFZmif9+5DwgacacHRKExSIv1FRumwCk8aid0w4yXi4JCTXT/pzqxCtjkSAFvx4hfdMcvbioXTaBUMzMCZfGTcGRJvQX+rtzQhIrbJ9W9eRBGFawKws4WGBLJpWFv4JSjC71XMXrQe353dtoG3/o5cwtwdC9raOE19MmYiKWRONJkZ6hMKe56frNVSAEHJnvaSsf9gsf74t/EDVP4KnYZyXBCPSEPKpqFD4d/K9GBByKA2GEGBupnnFhw40zCuGBw0y5i8jADIilZwXfZ4YXZ78X8I6nvha3GHhAoVGDjbPZsPjxwL0bkQ+Cds1g9jeJRO3zNc3mSQcwSoXvvsksO41cJ6u17OWhAZlsSptl0QQWe0wTLwtlcdwssu+I3pHQexRz/mUmvhLA32yn5/c7nUVD8TfYwYhmc5OtJcBXE17BlWZMVqVxX6UOJlrZuPL+CI/YPrLvE4Y8nm1d3KxAA029NJksk3TcM8ilRiAkZk7o3S3bU7dN88JzUJD8SCMoZu2ytPKfE64FkGZwbpv4dL5r+fyvHZ7FdYkfDPkHqHKPaZK+R+Gu2d8S5uJvxP8GLFrpXF1JEF6NHhFi2xb+gS7xkN41D2JcoK6/1VEJGsBYFD2Vjt9Qqr163h8Cg532sMfLSBaRWIs0JBysDr6FKtwzqDIPM9nmef9DY8NELJCclT13OQ7RPCiAYonELZ11Guwofn79l3VbonrP/h8zpj7RZ+gLMuwPBRXUhkrzJYecx7/xM2YnphzEfNkokHwDYfPM7/4McS1p+ebVuZy4snhypcqHMjmmh8JgwT0PELr39NkHVCacX1FBFxVltJw2whw0E5ZcsEFo5lSfZO8lPPhInf8KE4Pm2zy+AtFbCZmuxQcd4iR0mokn01DRr+4Ui5mImICUHV6brHnrtK/PXKxhY/9Bmrgf42a5JkT+iA1qw/70P551VDW96/bwCToKxImJ4zb4nL1MbduFh+SERdQhUElRhXj+fCJecYDRMLjRDSo5JFnw7WkeEPU9iICUR9prG1GavVLhh+LKdbTHpPuTboV5gTJsdfYoMUDykEsLShbXW7WEFRbOaJl3DTRg3w2D768w1WnkcuhQBH0pa8ptLFC9/VRnTuMom7ii1fe0jtOcQmfkMWirSHIwdILLgMuK67yhtoG6FJdYHu1rait30nJfBZT52TeVbmseVdCbC22jdrVq+UuEBVrbtv2pXnQUWXcQeZCe/NpcN6yrkgG2MnJl9wfdkPOw0GsXfQOMMhkQ3Y//sKYAsoPgM65O4TBf1ZoNP0TNaSSCQGzanXP9T0SfSzxk2RulrdlMLI++7tqb5F/rkeMPE9SG9ugX9poGaHJQWMnzCtMS0PJJ+mOV2p0HXnRi9Z7k94L/M1rbyF8o41Nx7R42ZjrrvnSoeVOHfCi5mhw1gCl3sY6lAeJN63T+WHb0UWqnLEPQWlk2h6a6/S6D5+V/Il5bghXT736DvGy9jjVIYUFmrLSAmgeqxtudASh/kAoizifSRewum+yMvhhMP5AdSrhV1xsRQLg8G7Rm2AvrwL0yUKem/fqLws7EMcBMDxicGKzy0/4Saz6l05lXUxk5uqtZcasnqeTFTTPknoxriTxSZHOkxy+yHNoa1yCK5Sq9DZ/kD/uunXxVdYEDBgJCRk6TFB/MXRZ932snIJ2hW+gcWNHYVUstK/jQYmQTV6v/695dMID9xKr2+wPyIr5e/0b0GO6jGk8wgKIeVZLivJA148JgW8FXCHJ9Xz3tuuXrvESPHT3rBy5qqYzxxMjRcWU/M3mDJPGhyBVOQRClUEgvsQuniQwP912/KXdrUVEI8mc7bqL27TjWOSgEP8eewhueW1m5G8sNXty/HwvvLrLmYV59qvMNSgS9gqXoyDewtQ8iPoQ2W//Kzfo6UN4GnWKjBIl87kmJj69S+xTXfs9VKTvGQ5rFgx3F3yuDTqd8SOjWNP926hNdUq6WBdRCdhVHo6vR4Ct7lUw1vau+zi7LBCJFRsdJLLKIkwlxPEt4X6LsP7sqxSTts54Ehi+ki8WG4jim2oBuJNP3U0zxsyGTY1CKjpKkX9lPhSS+LZXR1NZN2Io7qxRe5sHMsZH3nR5GTHMLtHk3YXNjkbjW71xLJQXqTPKwq6SQ58OEMG2xUhF7shUE0FPC3oFtuwtEH/NlwuE6kdLcOO37aH7JyNDTIxGr23dSjG/xL6KSwM2y1vsUVvlTu7/CeTSPBIKaht+B2M9B6r7QTZjd3u9AvunfsoKIsdhfelnLdL/mp1H+pEDEEeJcN1TtWtU1370ai4YHx0ZWnULTYlSoFSWstP/claBuqSJUUe/z87QjZA/1ir6W7SyhB/apGif/ueFRjEWtpNy3XOcRxjndTndNC6dHVyEWlaR7YgHLOSIVboJAEMGYKtN4glSSTa/hhDWv9Xto0F9Y7wON7JetNIZWjVjlN1NnDwu/S63iykZVz2CGzxw8e+Jh/TRLaVXvS7BIOdn+uowWvMWot+IlgfhpwpVKz76/BKE4msCXjh2mDXIwxRzXz0UEJsZP9Cr141qaj51AKmxOvZfyyA+snPdWky9wtcKMjQbfIQ2Louhal4EazJr55RtApmcz1yY43EKIePY/7PwA5tyHcbJPMc0YZhWG7wLx8XzOq1tQOFnW/fx5zbEeaUBLUkjSkoHLgJan/8IjfxW/rjA6voKeySuOemlB42gVawBCzwW50ezvinxQlvlkW+ZVdrzCqx0LX+tlFapGsBS1Gkg+vIp98v1ajE36ZMZF9rQESJAkULGUweV4BqWVwtseff/G462Tpxgncf9oiJQZSySw/Pd8/53ap5ZzB6JiGEvzrUuBia4JrGE7UftMU41sOjzea+D47KBnHyuW+fWRg76qU/PRPb4i6+h1fPaxEA+Tp3Muu331XSulAF3YGeryatv0mDPsEVaD39sPLzTvMpTGq/AjTEA0sNnx7/J8GTyJj61Wta4PkgPdck5IrSe8AUV94/k3o/3fYC0FezuCSUrrv2Hr/A0cBKfqJ2A0BDTNeiEFr2+h7r3o8ZicdyGAy+9GTLCsOvMLbrHZap4FLUj/y5sAohEw9n1IQbLwRjDxkyrA4osahxO0eppMJCLwtKrimKnfTv70lpJ9HmjePgReo87GNLMy1XZAUkBc656h+zOsnfsw9GBuGl5KtkyezrquYUX+SM9vusxcYB6P8TiWX86KS3uhhX+oVDdr2FA+kXZ3qOl5df/wo/tEZtZiSlKVjR4a85THon/K9E+BsFROGsjhKOv4FP1yAXjfRLKXcsrStgjIMQxFpKiLMLdF43p7wVzVFPCiosPDZtpZ7dSm1+YZsiHkqh7jxVAI+HZlpeyC4ZznTjntGVlrPNHwP9lP927+8RwSi+p5ys1n+f3S5FcTxvV+7z/gxdb4WQldij7/nHAsT0hcbmPIqVCxjPyhDC5DgaUgiQotLifUtxDP3n9NHoXk+2m7O2feTpGvK3JJnMIvI0LeM6IIqRijQuNrebhO2i1zvwrfw79qAePL/rPACgCLTOf49Eq0iYC4yOAm1pb+Hgt+brdO6KJ9x486zhJqbopFlC5qIAeyQ5BKyTBL2KM1WwjmbfRWTGRKEdQtJRzTrMMJu2LiSeMt4vk9tuw6ZF8WWY1n1m3usPYe/Serq/1Cv0ac70gxTiOHh4QOXnWsdO5F1KEWNIISQjwAMfMupKKkq9O1YyNveptDDBSlLQ5jsNnFhH8MlbJkLh32z1wN3TQkCHGaYuZbBFEV+rlsK7RI3fy0jiHZrp0jlupEI7f5twzlN3mARY76eCgHmvQxe0z0jpp7SfDt7VFLhGLYzvmi7kQhfWHDeU3n2l3IOwrQo4shNsmPg0K1crJ61Utqa5xOVu6bzZPQYVzUeGXIDLVxer6IR4EIb6KB0TZbASEyvBcz7Zyxt3NP8Q+4hd4z4rKpUqVJJ9KAuvg03TalsHrCzvRJs4SwwOckA25I4uTcE8uzoBJDjCgsvCRy2PAablpdFbBd4jX6u/DJnMhLxJidVn2wC16koJgd68Vg2sq23gpHGMEXIbUYZZB1X6LwV0y/g5B/cDIF1taI32tdcjjkjdKOo4qpjjBra3P3Imv9AX1OIqLRWtWxubSlx/83UTlTDO/c+KcBWbIV9Jec4suDyAf6oT5NoYw/vRypYKI6dPh9RYmSps3Quq1ROkfA50XjgYO6ZXXRggGm9KTvYAT0W+d28hcEnsu98ytWn2XvhFAY5yhFUMyWpqJ+DxJmL86KKIqGy4LtIhyC5SMwjQSNSJg1pYgMNl+zScphB0+YyoxFgEeQqxKGHtKDa6ivnOrsnhLCSNqt5K+khabKvxScF34kyhcOIHuhS11KbFAIYPFuQZSeloFGRZSJt/IkCdLm2jCGZzoHqDefHvgnkSQWwt7uZyGRKl45AHoXZ7CxRoUqhmUIoQeCzG+/qAt3EFW0LHNKsPRZ73epO/0VIh/t+mcrYyoLqoS1/yhJSTs5MQrZXeQO7rVLF3v9jp/sBaZJEO0S7u4Rq/yJIh2pM1EHFOXKwWCZgzrj4kgN8QnV/ENYDLFiuPEBnylH92cS5dD2GzavuOrQvbaBOYpuN0jxS8kMDisl7XHKKussmIS5JtqcYhvTL2CTxjmsc0lQKjMD7Wms2oyItp4o4+nZ92A3w9mVzOMgX8+1Vhz/LbdCepDZngDSo905iEuHf1ySoip04eOJ3Pl1hJFF2K1IRTYS1rF+3H41PKG+64fIQ10ltUkMf9dIiZ2qnZ0/6ZX3yO9Kbt9zq+MG6fUx8yhOxEM58EL8NLrY4INNYPEsYJR4jkd3O2ugOAMUX4WxkvO/kahIWGkWoF5YNzfpY2DabSb8lGAMWbkDrLq9aNJ+ipCxH4sidIw7Vac5//f1ls64cYEtdFw4j5QAVDj/eFkw3rtr0AspJmYS6KlZ76mv5qC4ct3rPJLhlRsZkakiVZXwOOz8vwCILrNraULlMCHTIzG0QwJFCYlYsxVmY3dkUO6S2p82g0RSI0KvFIswFnUFdDWh0wJcWKZHyYlWw9noKvcRYIQT08ss+z6zA95AVmjwlPwPRana6Gdl56kFu1P/obvH5+jz37qeytXPvxllnB9YfYOScEu1mgPL2pnGqmFw3ze01BgxlnhsU8J9Tg/HANXBO7QMIgKovdJUIv+cyQ2OKD7oQSMRnA2aR1h6wtCPnbHFPJ8saAfpLWlWH5VlseHJMk8rseFSQ/o8Se9wW5LJaI6IJKHy81GLtVoQ0jr2LnV7izY8Ppl+zXdY81by31Fuzrnnxp8kiFo1mF5ZexGILt8XavVBdne+NX5seWHOYNXWalH9/c8jhZCH+B/qRyjTzlaTMl7hOLMCmcbt3RK6Cuv7SN+H1KVqnMhrrZnVkkFNmyEKH1Hy0rVGnigrDbLpG7Rg5VHFpGxT/hn9Go1BDy855aAqNVc8FwuZUWUnGBGfkhhqjTrNzci4Wc662UfJHWUPna22GbW0RdpghHpfmRfy6ZJdR50aFINp4X1NqixoAxR48vWciH00AgX4U1imkkNp9H66e10iESU+4Dvz/QjFYcOGjAr+230lKtSSNces7m/lBxSuHVcgkf2Vck6FMv8nGyxdUktqtcjXFtd8t6+m6YXHDJuS1wAjiywb+qWPPUZrWBTPTPxuaQDyt58WuXx6/l6xIlFO3QOaP2SVr00p4JJvNeod5EmV6++l3rVIj/pmCUJgZgpv9e9uvYHhDn/yVwc0ot3HCFhc5TR47S+i3Xxd3Y1i1iOmfz68og8DYFpl4l/d7jQQUEdZzh6U+UXla584DWVH6vy4yc2EoQumsdsGAnJ11VdKvPMH7Q/+0ABpjsFoO0hNEbI/GFuBlY2gn7bgo+92cLwnO5Qh+8Dj463oDGZe7G4heCNETSwbYjBWo0iPbbsN8GajvZbigllLrMpGHfVZQbEriIQO+ibAMhU1VNQLW3R3DocJGYqEsjWOjsuroEpJrYLx7iU0OGptk4+bU4m9/9KZF6qwWVEhHt1XSh4tgH68Wr7lnVBGz5q2pi8XJrtXgBEq+Mg+VYZEc6f4+3p+lEaxe7s1H1iUMkwS2IBpcrIEKpCfymU2D2WZnAooIO4qjSHBiYOh3+VsZJcvZTJrAW1WYcs3VJHK2CedNIlMV3jrt/HvaY68ACiahQER109woRCYdzSDpfYluIDpxs0YmRdz2rPfN5oLcG1FLczYUTU/WPcFX2emRGTcq5GIX5GbCsBYk9RpagIOYw8a7o6dV1/uH0978l9170RkMj0Hqk1UM3czc7cp13IwBuhJ+xckwiKm1vk4m7ypnZo47pi/DK+uEnfTgnQX9rXmQ2d+hQM9UOfPMk7FbCtio89Wgjh685qGIkE3x5U3Wwv/2fnRE3g2Fs5FBKgwKuVOwZKWRjYRhoT3nHBzXHB+kTj0fD1heA9aYVfgISGsfqN/ed/sBzOz9dxEUwRxs0FRL1tmuUCYXP22P2BlDTnAGkWk9QhOMBvXa+KU2tvMVUf0006iXkj7b6PuAEU+EyY092Vx0/J+bMjVbYE8ECQPxWZyP9I+RQag/5HyAUOgM9aelUCeUdd0UoUYz8WLT9cU6MAAZH9XxOjTWN2Whv/2DjSuXk2MvDJR2CKuoXEON7RpgZ2XIru0HmrmoKvgt2WAySuCh9Fpc9aHbqj81X473p1W7aPMprZdKcRWSssEUkE/FDGVaYAtvJ+II6UWTOXjevF47VJrBpxKfWw9U1Vskzo3FpOTfCaJ+I+Yuge4eCdPlR+TteWEsChoMaDtZxzyv5PENLG0C1hLQaD2C0gD9h8yPN5oj5hBZjDi+aHXJnbBx+Qx6yMMVK5PpywGrUjyJMMnVjpGcxFkkzc35W9hGG8Esyq0K4b0vHTAnFCYGbDQAiKoP3nNHvzT/foZzGDQ7zjOCUY7tRxb5T2nDryeOGqeOrFF8sDxL/ZaTjnuQKLLLp0rnyiefqJ95DheH/a3yP/Gpwc7phADrFx9PoZNXxbv1fpXD2cCKsxCTSbkj53Xim5d9Jk1O49dKMwbYVIooQpn9rpLnyU5HOq5CFEkNgXSmX4ziizuX346MaGSU/elBMmlIW0roaZptNNMZk+NFEbkiRpvvFfPAsPq7X35qACO6Hf6/Zedd3bzsNKM7czhBTxDN+NWMfIvE6nyFZxLJA+SMgsFmNrhVshvssKcf7I5DJDUAhFjka5BYLlft+CmfaA+SheuAjmPO04v6SepfZWk2qtIUWgbOdGwl9XvW+uaT763ObHd3bIOFS3DP6siJ40gzM0XS7M5cgqvT0Gj/K8ZrN4+cyuSern5wC5DsCK1uX+Q50f3fW9XTpoQieEilo8u2dLanPI0zZrOSsI4b0SPc3HWl5CUGtlp/BkT9LWuHzArVQqyAxfxIdDe6RzGNLvKeIffjCbypBJltCKwzo4kN+v2PBicxRk+u/5ZlPRcqAHNOAI1zzfcpk9KkBt//Oy5i/oBUsekXJg0SJfoDiWBzjDIlocEfE5s+UMNh9ieRw6nFJesFoakV6geF08DTFyCRbriZ828OQ05zQ/Jn2ZX6mfKvYds25NWcNaLWTeuuujwTZzN25jHFUIgubJsUQFhEeIEYiQyoPSjRM0KWYXERDd7/vST4vqvZ5pfp0N94WEB7u4E0mh/Ovb2pk5/lOSerc4lboXXA6ompIQgYnG7DMO2XmRDnWukXrsqUNTyT+a7I6Q5W4/MizJmQKs04D+FFsGXtQksUZ8AnORj+RaAO5O+3VP+Mip6bd7WDxVkOxgwVfSpHJW0KIIjd3FSIw7jpRYe2ksk0YJDFI2eAAtMhC8igPuNI4krTuE1rro0HfthTYtD8c1rfYxUXdM3PEZDvIgovXySq54RGdfojEVxyKwL4NGshwWjFh44Fa12FG3r9hBEl+xtiUrc6AEtv2MuSHTpXTCKuz5Q9leT1PHAZWPkYiLnuky177OZNSAvyqIy5XZXKAPcvILsyS4D1FoqqZGMxRwoR8HlsPztfUT7kCvEYKfWZNKXgdD+js1MFcr9hXWUVtd3ZIIHzn2ZsyaEAykHK3QtCHsD2XYL+d/XzAy47BhqeNYRuk0EVI/JOLHkmAUsgg+bgckXe6ACZteOEo2QnLsO/WUYrzSOtVxEKwy4yWsTNKRNhOXfYSsJfbJ9efD+hiKJqwThw95TZK+o0fSSSsponSy+cgVeVbh5m/+M2WGQkBzDN8zHRU1ta4q8EjFM5CfebUlDvmMVg5s77SG6mG5Ub5x/OZKdwHgYHzbH167YeKcl+EkLBihXX9Hlap17OupGM9LWfwDi/E867M4akmR9TXMB21RegLoR/y3S+5twG7V5JfBKWpOLSEfkua2Xycg7KRbM7h/rBXgOcmrCtKLE1pVJT6XJzdYk0Tw3/+0Wt1k4O7M2bsanUQWdYQq5GIrrWaEI+lYwmW7rFaYV0bhcM2/L+XmXoxOUg9Th5SwUGZYSn1Yc7gNNBHsHXgjtW+Fm0KVtqu6ildp0JQAnMnvYNFg4WSj3+Ct6otos3vRksZY7VZabu5SL5LH93GvbwmhIHgbymEIF6/6xSIbbEO2AoqVdOKhnif4ZhfKb/QGLwsnlb/6BKpR6pkLn2R9NAx82W87cJmWBye9sems5+F1zo/2lwV+hT3CZWgV6Pmgtr+qHPccpdsO55mmaUgd10shCqEi8ruvrIf6T1ZE6OZdCVA4K5age5CiGEyZun2MfmPXchXO9gNkkdbPzVkkQPBYp+PcQHcJ/22FFsrogLtJHV5qo88IYyMvbkhymmslmKaj8DPCyr6exkJ8b4OxR+NNtwsqPgAnvpvyAZjCYHjFOO4pEsc/wXINeD4X4G4IdKye0aH0zyU4ycyXo2EMvn3SuExY/kgq3+9qBXJUqJFiWaHGV162Te51N26+wSikIKJuNwxuRQ2DOH2AHqkGwHTjEsHcFKn4Lltt9c4Ecr7ky+43l+eY30wW0bfAFtc65upm6xA9MUwJ8rnOiqB4Wlc/p1EBilU2XdLtQ8RCn3XssVsniMr58JEmwdQ8WWf/+KWq1rXJvecvjv+7WjTlo5wd9LiEbZzr9XnsLpfGaKDuOImPei/qN3bOoh0vf9BgE+Ul3hQGDGcTPqMwngXyDbCEnEZUuzcZurh5sMFPuq6liPbop7sYTvVeRlxjXJZm/uy0JKZGFQUabJFuA22MDmt54eb5aq2jpDaHNT8zyE8RUx2FW5ihP7DGQUpod5fub/NVunivD5uf0XCxvK/Xpu7hNv1Gqo96Q7lMzgNPUMFV8nc+W5SYtbzgMYdyVi/m6K8uGaakyaHYQKfW2zwkHWHcQ38QuDGp6c+N0jPwkivMYZdxX0N17FEPaOYsHPTQK+idZjSUTnv0aZ8eNKTvPcUBBWwwvKbEb3CfHhnb17frLAcwOB+sWpsCAM60vKFUnp4pxDlSnNelKkbIRl7sqUyG7p5t106YdkaCvmoysc8UknOe2SQLZHRSRXRBZotDyuTqu+QKEGN8ENSMcfu9deQMB+eY+1v/r/9nJlkEp+BUpQ4sx/FEUHP97dV8w20I2VENv3z8TnT/t1TyDzrywVHzub8DtUdh8b6eBfBEsjuR9oXgqvrCnaSqMpyIBU57trUNDesKHmEBYQZTfmbjcmqP9snFgG6g9Nr264Wtx0fvHtUTsxDh8+6eLUPZT0dUqlnd9v7ITmUoKwbur/glUH7LLgDSjeSwItAcenTbX0W2YpXpj6REyuwVQK7e9tQU1UF8/Fb5/EwrDlGzGdBuPpgKwJiTH+dnP/EbjHvB0rOsDzgpbFz2nzfMlLKciQjPmxiabmwtiYp2yx+Q1MHZTo+usTvocLgxvY8NKUjJOu4PRQM6004z7eUM6OT+iFskeLQ/Bh2fJ0FbjkTvn/HnjJZkSTz2w/4aqOfUhimFJA/Nzc0pff5LqLFD2YEu37NMdvYWIveFoA1vZxR5wXbHbQQ+tCJXGjyldJGdsT+PWdu6pHYZiZymbUfPZAO39RyNQaH7mjyZVTwiuFK1T40mlV4v4OrWv0ggzQ3mgk9eUZ1uK+QYr85AMPr0cdyOt53JhKrWdcHRrfIsv7MNuSk4Xslx4Vx8tW8U8Plf+h9e9hw2Qd7BKZbiWmFpOTagH7fxfLuZZ06VHjC1q58z2Gg090hAWbgOmqneCr6UBb6k2x9m/5GOteYBMTWTQYaHFkXJMqKJvvKv2Pn8mdTc8RCaEsI2mmIXpkoAEjs49UiL4JBrM4RvhB43MAIKgsNWxtlp9gZmFAXORb2OD6o8VhGKSwSScT3YWI2q6LmcN3X85pIBsZ3VomGpr9lDLuhfl23UCPZZNosj1SiB5u2e46F/UQVcFguwPxAeB/Gbjhvl+tEVfpeJtNNm1GZLybV0RDZF07/wc87w+aF2j/jGzVpVWQcOz/TO9XMgdtlPZp8pyoQKrCofZskPQfh1x5sfbJM/jnLngsy1fD1lAZxQ1AZgpasp7lx3lXW4TpWMNW9k78aIhUCt1AGeKMb5uDXdHTdnFs0qYIJb3pPq5Zhgk0mEcex7wKWqPLEjktYHpiOvME8mX18euXuYCqggoghKlbKU15sxf1quRED/7z24gviCRNSkAyKNFZxYmd56WsRJv2MIz46CWMlBLCceF2cGDaMHoxn2QsJ+cuX/g6f420vXB5Pc3Ua92MjeaAtnb/hEh4Scehrd3bXq8rADQDG5VrY/HzsnEnP92RHUvK2rTM/uftVio3ZRnMc0ZFrr0haldAW6/+tbnUO5oltrUDT0BXugAVjv5hDxwhLUU9ptHuOz9pLJkMz3hmbNjyeqvFUMA4377Jv2CF2K0pba1O75Q9qs5e6YroftiCEd1mL80fo6eWNOVmTtEzofp0MlZEPbXKG2i+MQRrJIPOHp5olYVT59XQC77rK+TrUIp5OTV4AlwiWYk3lMr69Dvz5aitADi3fs35HeSb14qRVQstAAnQRYtHneWGwNcMw39xI0psVdW7RpZw7ahaq7Ttd3eCwx1QyPDX5y1rG1PYhQPLevBCE1OJHCCi7oukldpirQEb901xfWb1DzxooanVm2o/k4VDRVQIcllwEIQ/shQcr0LganP1bmYm0e1rHeT8QWzEqTBgpvFJriDvdHE9WZTglDHylSUVC1TIOeNljDU5fIHMxwm1v4Gncd/vk+pSLaUbvITftMyRrRtjh1pUoUtKu773Ky/pLPU3fD0Tgydz4uVEhtMm1C6xn96fLDYQsKuKaoCwPzfXO2Rn579zHm1/DvrLPPcsHLI4x2ilogUwoK+p089rpYwj3v3M/87be/4bXKeIHFdrN8B+cdoHvBtlI5M0oYyYOeZllet+mUik7mo3geHzlIoI0RUTGgcZU714hcYMDIAxSQkzSyG9Qo8S+tCT9qj3DC/DPDzaLZQ5n8uAsz23iLUFge9G959BVqmzMstuCKCnwwkGl/8aGeyCXJuh0dO/I90e0CoRUEAAJpt27ZtGz/btm3btm3b7mXXzbaNWcQs5IyGJvMYWVgjBJaS+NpTAAg5oGl22Y+HuzHn2v2DGARkqKutQ5Az7cdqRAnCyB7IDr6qlD5/CE5Ay/Wtw8RruSFQO0n3PJFyN2xko+zSbq0Onwh/hDYVMkcXobHk02J8J6W6ur4iZDS+UW5lzstkTg8VZy7eOpvCgGx8/uURJEL7yF0oi3DZ6ESOK7hkX9w5uVcvjvRUobEQZ1dVv9goYWxr6gRhAGbqkK1kMXbj8toutSlz5+WhcfG9dx1R+ifM2kR7wFZ9Ey7QGsx5H7zE+u2yAyvjG0/bShgH0keHJmkVXAiPk4gYYY9reb5vhVOSzwTtG9OAgdb6w7iYAp3P5bFfcKiTGgNY/tC6yTqy0SvrtzMRNSRQcenqId1i7c7RLOIvfxncBOkgUkLr8IIF06iiprdxT7xsLb1tDLkvc3dCI6tVYQCQziCNqQUpzNVgoaCbi0D6C1GA4gBqxiXs/c3zD8q0wLgUzH0a/zX1ruKXS+1g2ZPAaGTjr7NQ0oUkKJ8JG2L6fS5KoQUNZ8XZDuaX8SuQgKX0EdkCCclsK2F8jZGJS+u9StYPwmak7B+miUS6I9J6WbR+uBRqKkIfGzZro0z+imPcTegm5O4rXwx1/Q3X96FoH1dbdZ6wOhKm4pcZuYEHbi9Cq55K0pvUCHqoveAeYWkK7ZYYCHMwtRv8Sxx4cH0/5blIOdSOCnoWfxY1rkaIsFsXYhW5eRtXxPxBjqFwFPTJckLeqEuJPQa79KyYxF5CzG/yVSfdxtwY+m0JmiMAy6qoGszRkscAgAqwjhdgP39u7z53UlgJAWD33+t8rWYCQ/lZYFhADJ33aiOgmcDXpPTLOsYjUrPHGIUF7xvRYUcUHFN+fsI9qXPDwe6CSaH2/u/DmSgmFnEzFN59r3z/yfbFX4mRSkeEEbYoBzkNyIYegAz+epr04s6c5uFziaqBZgd7ZSpn1nDe5u22JpSTLYvRmYYc15RxUT/8Q67mAdxXEiR+KopMaMLCGPavR7dOBfsqF+ReJ70hokWQmupvdliofGW4BdLYflZKSuP4wkwPat2Y7So1RfFyf+ZjmM6Z36bsGots0mR0KhEXrBPT/AJBo3FAtzg5vuYmC3x+tYQa3YyI1jSFxRlfgCh+QEHjIX1YSHslnKE8EDm72QKEG7Xfi/4aryGlinrlpkRDRWbmfb0Gwa7wYUBayBCohE1Hh1LfmP5hNFXiuz+dsfKUbgobi+Gas8gAgNfQOhhmcG6rxgZcEbvTkiqZyYyL9+z3Zll7dMZU8qvuiJHAgD2tej2V0fRo1dqMqJAcUo/+8/dkNMvnPO+tX7eR7LnHu23KnvkzKqNNpaFjvM8PmVyuvoWZfwzuhAwXfj4/8BO1UGKdPdabrFHxDvhFLq4WsRF36iK/cxUUxZPhQO8ZvUaCHvIHirNm/44VIi7MuMHfCsszOX01CWf4jdGFWyraS4H7XZ6au7U1YSswF1rEYdlhHFEhadSioP6npzEJ43oiMbchC7ldOa+CbcyTR/fWHpuPX/iLsYs5PPwdwUJNp0cl+YWQfibX25Z1UmRGvUHP+EKhbev/ZLFO+rs1Hy6iafTqq1MMstsAZnWEHenNw4PCyK1h3eqP1fvwJyR6aMHGX2LBc/FXjRMDGqgP44VQbHJcGmQ/VBVZklwNRFQsZG/J5+qvWn59lV4Iru9jzzMu7m7wOUAMPvBjzjMIPsSeJrHGGnGBjatyihzcr17lYyW4r7niusRvF3LuNscdDvlOjs1SVxclJRenZFT+F48MoQVkixLcdFBL+0r/wuIqQHim71NJ1KEBI866gnY9SKvLPyycUBDHX0th7bPQABlreV4Uq1k7sm2ekUM1fEXucDt4dauITyeUjq+oN4IvV67Df6gQn3Wd+wM1b5norayW4yW/iy0ZtcAo/B5D6JvYiOS5rpXtWHSVS4qkda4uwtlYQocFDBPk5P+smEtV+gRo7Nfos1xKJMzmV0u9wAmF3wbCbd3slhPGxKK8F359Yrl8astn8HB4N8KRTWl/vrwZisQbFMP7EiptjvsqhOs6QR0d9XigEWEpPELlHzDfjlyJqi2Dcq3lcv34e9gYspRyh9QUQuMfjWDMtSQ/b/+5d+YJ6U4Eqja7IIUqGrTqNAacpyIHpEDps8MHmF98ypdMbDogzcPAxeUsxA4+CnekKjpeUdb3GOova5JBWreYrZeJBZv33Vi7oujo7f+KDamBC/jmk8DxYDFz1SMoKXGUykzrkgqZlDi3icO+2Gh2jBrH1Vb1op33msiv56HmxXcUr6BK6AooEe8QgdMeZRDZqBK9vuSszFEv+HBvtbS2/Lc2PZkbxU99fS3s+28YZ7nSm0H1aaYhxae4vpEhypw+IrDvTn/DbcUOwn8EPJy7hOeBzlRZOnJeAluXuOVCC/PecNcj6sLTLZtmlf3khNXQMy8H4pQc0+VDyu+WE6gIdv8e/dvOCwmuM2JUER3gMHGKr1PS5xhLpgcQVETdq0TgIsla0VqD0rflTD+yhVx7pNTR/UtNyyt2/sFDcb2jNIAAFavtG3eI4dwPUELVFtF++I6BP0QswkABYSTUI1JuN3lKGSicofovCjZrckdqtL3puF4nWHC6LjWtKxjzU6b1/cIQ0nfrI0VPPDauIMnPZNYajGSxNcYl8mbuwQucuGgpzgEHawTZ72eRYiBX9MzaZUdB/Y9p5Yg5fg3d5Rcox/K8fCdU2HQU8EXy31li47Rxaix9lPn4ia1mu3BcqpzwLIRdxBcaISSWVV2nv2EDpQmImDfC8/irzuYzqJVl/tKmjSGcR47g+ujtceBt8AUrHYIAGz7X3uJVDYXTAAkuUVQKGMHaIJ6EwORMf/e3TvjiGUpulcux7hQydLz1Ax3edGyCG6ZR+8KbeykqjAmvC/I3wBumHWEXOjpzCQt0r4ahHbWcW07wNgv4JwWlGC8iTwGXbLU4q5MIBt3e7EBShkhtspiDjfRY3nbE9Q4SU9UWFfkCMh9II9MfvVzSXirmnnTxPmV7+NWVcavJ7+vjb7ndbGsmy6LPFpNTKi+SOoPqX0nCXrOzaPxbA505pcxvst6y7yrr5/7MEu2NBDSwcmtePN7QQCjqIqrjqOB+kVd85GfA1OX2euf927016h8NKCsFJE/CuGjQLVf2oi2HVKSY7zgGOKXgbERAm4FVfsQwz2X5Ttr+BCwTV8TrliNdXc4tEmWm1+IvXW2mTfho+x418I1URH+j2nOVNnDQEBnuO2Rhao1RYBvQrQzUXsM7YTGb4qSzxB6MMMQa4ch4c5HC/NUl/7w/F0ROQvWYOwxsNK5vvI8+iaVBqs9jDtm5BTOu7Yzi7IPzYFs4eVXxU/48GaKw6kpNF0gZdf+MCd1koDGO038VqlpYnaTZGNzKTck9Wco2WsTLWi7wCcr/WdwSmR6FtXjDIkeViG6TfGGVtQf5lo9RVeET6t5GdDxBEaARYb1X33iBwEtef00uTWBCk573n7e7RrMoHO7FH2g7RU0Ctpab34HUC4xjm8mB+DGqhSHEhkKLZZYi+alSeKW+vfcNsmM8KKQH0cWKsxunsDLwILqlJ6RhATvyFupXUOJqflrNDM6uxF365z7Cba0nPxhWu3QjRVt89jYJdDTxnThYbo9dBLlL4cJSxmsaxjaSpajrAIvhN7tyX9pFEVF8SqLbVH4YJhdPZclZchwsDDaIdNOMnTgXr45/Q2fg6V1uciaHtTMrl0OdctWNQkB0LTZpRzwkKSxcaNk6h+Ru35f1C0Tw7VFp/7VAO81d+ve9vtLXhKo5Nru5YN1BFrkKiebFQHjqECF8MN+BJbNounilpKpVCoMZRj+37v0ofSm/UwOYh4fRPs+ln4YwkRGtr2ZrPLjhEokFsbEM0CimMVL0NJgiJYzUVoCcwA8S+pg1ys01Qm03YEqcud7vIwq95NZR7wlZOVkOpKEy5jvJ4CCbZgNV3kDmy+/nl9yKJxqU2vjhUnvSlUxGWZ8f5xf5swLhS+OGdXQIRAuXkxeclXnLzSy/Qo5tX+SCU5chtb2YH6AIFU7ZUmFNKFYkRhVzPFRAa5sg1tsJmysLkU6ZYehwa7J2yVIMkQnhe/rpqmMAY5UsFO6sDXkkfNQFeEy8I6KHI7n+Xz7WcJG8wwNpnOLKapFpxP5NP2Db1MqBt4lnA6zfwWtF4WSaHLSF4ldvbpFh16CHn2s5c13ilrxb/KZmyWtOn03bB7agIDld4SbeXAaFRHyD+B0TaRA4RbRuCmTljnlSJUez1oneRQYT3QXSw/uKhIY0pFoiUMMa8CJ5uNCM5Z4mtYxAmGA/Suj3N0w3LyusY5d0B0ohvEW6+J1lx0vPMRngl8iqVbUcEH6y/xm1mC/0vA1qqg+ZbQElYEvGLBzya4Am4IUMWMVBsPB2+E7dBNOsDRCX8xDdeZH0+hIFEGuJ8TqQembSpAH2ov/aR6Aam40N/1mlT92uUHxaIorshSj3RxVNs2G6lQoC0806YAZaAtE53GL3u3/uou93TG0CgT2SRNJHnwaH4lAcjKkK1UnvuIWwI2lrMO1zJHjQ/gmBESAS45yUlvN84Hfj9/A/fhhJNTWHRxQvJFvWvYDL6Lp8hM8un5ZRlVZps7kVLGpRqaBGv/e7jCHaoS5yZ3/AkczNNLzFGr0eyeWlWIw+48zmslNfbKR6RVW64jv2zk/oBioWUnwnRS0uPAFDgooIttlpt0gt4ou/7Q63VsAo8Pt8VfSmpz+/8IXjjp1iONZ97waYDpDbTKMzCATZOgbXX/WgMKpFUkSHq8uhuAfUboHCW7AXsvXxOYxbapqso2vA655VWzqiZi8KuV2UnFFfIwiGeRbDQ+EYOpjfQGyvlNerSIsggLyCt9tbIC/g9rSHlNiNS8DeaJv1z/URrKQpgLMxi0NsWX0dRRXKx6phOEhBgT5eHTrXO3TH5zkWjrD7/YSC8UqpiiH7dM7qpSrHwz6oUAJe53pv4Pe1t/RcUvE57zCcVl8sdC4WVmcIYWrs/gC0yyJjY+20F5GrhdUdMzTIGhDKTs/arK4LMoCCWbzKTJrSNC36L0J8XdcuEajsyPOsc4+dlW2ngbDUb0bxbvG6dXvxSlkkKs8tQPOPLqmhwYxpWaXsL2CAfS7AtFQ7yu/d3gU/GXiYdEgV3H9yG/omXkB3+AFf25/wMUPbknaRP/LQr3beCpGSSovT9AXX1DuOviq2RNgpGziNXwGooQAnXFoXyPSAdaogaZU+ACiscpx0Q/UDQ8UigKsSZm38guGX7vO7fiEjNDaZAng82xbOXPKfPGVXCYpaz0PVAnlEQB0Ycf2AlMkjm9UQMLuT34zi5VZiI3e1F9I8hMzkVGbbdw+oL8lAfvJVmeGGNJBtPIUrzD/IpUCybSDw9FVgRexy8XnygpVZsx8xjOgiMWrKXVWa1JzSvyyjhoSyDKpxGjy4ut6IHRLDPnfYu0gsgM8IPl1bfoQtwfzahjiz+8GnNNMQe3zQ128OnP0locbD0ZVkdUJh/Qd+9ena5MNo2TcLkHQ/3Gt47JiC4L+lF9YGcxHBnL7WQNoOOl3paIoz4TclO2QZpkZ9LH6btLTkOt9I9iSwV3UQIDUvhR36n/meYx7/ISbhynPxKgdUhTbNy77Esixsv9rjH/ra0SLf88DqR17pvxAreab1JIsqfvD6T+bBIdVBRsuk0KE/NjkL2AOBFjD6E8xHQiOym+fodp6uevCCfNi7oQ26C22y2oMhqvG0LryCmX2YIeoeIXifGCc8vN1C2CwCK/rISTjbWdVQgJl80NSj5A2PkHx7OjGfBwMKrPR9nLqRicCjhem7ZQO/k5pHs2WKSdToYZ4TZJeuu+6XN4KOneA4YfiUlxFG2lad8wRiuXtzzmGGeJXlNZtRABgt9F7MgggIlm0FVa1QJfVDPY0HPnQbtYphmp9LZuvfub28rJPi6kjp8kQBLcfkoFwBSYH5PzNwSTx0lHdw+YA0XQO0pASgSUwymZBTNkHBi5YymnlBblIqUigq/DOjVxDzr2Aq8dvJmCPGBIAMRGcX/+fmiNuaTLzt4EA8Lry6sjWsnjnsZoKFjXQ+MUdOL8/9NzQrnLCrSC7DcXidVkgI3FYmOy67PVRtUtL0NCAziT/O3H3uFctTwaRxhgcC/n0s+QWxVRFFPSRoCl6gVY3AcCBPwUA/cS8+Cld3MaGQRrO/Dk5K6mhkpBNHlx6MOqpezwQWs1uI4WQZAcE6f9wSlmvhGOyxBKLBDI6MWKPiNh7X2Gv0c3N/PyHMOmyNo/9ECDbAQiEBXB6xiKlnqXU0q4eoll0RVfUC1m3YglibPonpIWuz5GdCmgl7/sNUumHabhqLYCLfXiU3a9059tUYdZ9ssACF4mVaD3reonuIXjJIlrbpbA3ZE4mh6aECDKjGXshbDaRD+SPjfpKQYh3xGiwFJbHft2uyUH1rtz8fDOP7SScwzu2GsMUhZeDLROfxERaj3mAX+79PHnxQY+OgXduUKf8NiisL4N8JtzIUUSq6tafjsFeCKcrbNex8HMdZKETN3LMVdou3Uu5t7vsEszo2kKXkAvUhLsCm1zNQTCPfu/5CpCMS8UuFxtPtZAGoY9K1ZGAa+0sQmvNCD1RP11onPdFA9Jrvm6uYWOXjcNDAZw5vKzy0JFOWn2L4Br5bsfYDttwSd12ON8tkmI40OmVAYMFRCeNbMkS7J+o/04JUE1TeWplpTuMM/TQFhJdK9454WW09soJZWmTbJOs0pdtg9gIvtP2cLNUzmu+G4dbf64upui6N2MtIGF2OQ73c/CRdtpAHpPEyYczV+O9U1+jWmhr3gxBfK0vTMK6xh/eFPVdSWi70UlR4lcanZ/gzvMp/GoQBbMgjujDUY8VRpKzoQMs1fIuScbn5T3Swc1ZPseOGdiLH9Lpi1H2nuaLrTsOZgtpzYyI/OVEurYPIrfGZBK5HtQfhSRSqYJbNZi2NBD4HsD8XshUWRccAR/FwG5gnQZ/WkVlOkFEV0mxBFgW+VSvC6q1/PCObO1rxOhQk1dvEtKqNV2CWcQZycQxy/5Sy+YF4mWLVuJsBGDDiM2cEvxw/ieCTlB8Oe3kihBKJnOJhgdH7NvlQMfrUcJju5tyRL6tooxtCYs5qeHTfh4faD0EMWOA96I0uxDNY6kQn7DX/bLJDHNVk+WoZn78BzZiRcY8HYtRl9kHDBQmCGPhS4S6HZ5UiKcNxtZZlRXxSYVpdqLuYKxB82PWF99db48JmWxb4OhD4ys+P1IWNIVMc+3Kf4d4F8X99XQ1bB5QtoXECBoYE+pouYd/dvl5Jj91pRovVURt+gWCULK/RJ/gc8YcfGmIV5FaW5T4p4LW52WkJuBjpE/HXzDkIO3w2NiIKAUTj3mawQUE388e2t4UCU++32o/DkadYfibSRrZpagrkaS5szFePpE+LK+/aQVUbeeRDMuwnbNp/XUOWQFDouNUSFQ7d/stSa6FTdXbWvOggvaCC9hlPVgu26UNAl9vQ93lc9ENcwg72hoPoVjoSBavDh4AfJluqWQb8puA31FJQF6g7t7Z3RyqKmPZ3X5L8fQ77l61blsKlFG4qUTnCZGh6UoT/fOeIjyrCSE3WI8RVAZ/xIBaljeDI71+gvn1fq+iOndVaE+lzL2p4X+Ig3luHrUuvxjzFcvtWwOEhG7c9/Uw8bkJVnsfUJ1TcIxVEYwYJ4M63HcJyXnAk/JrrI4hz9LClIF6TUjtgtCW3ybnUSEcUNzeGynn5TaWZP+sH35HlffAceerw1o/LKkJMa9PidEYbr4JYU5PuGeBfFBWH/3EK/2i8KuOSht+87oWtvEQpqjZbHV5texjzoMZJT0v2w9G7wYgLf2iiHYJopPe/JJdVNwYg978JQutdtRoLsQTKXuGzRt6D+suVbd6GmNaDnbqiFPwdD7EMAJUMAPctxAwx6C1tWi6Urv4n0LPt6XaBWdxNmIVRcdXnC3J+wkMyaychqUbPwMBGjf1cgeqyzT2LRltQzFZBJx5D7JfZqOCa2+oqRXebFFu9Wq2NeNtKUlYXyTiym+Xm/lUtWsp/VqjpmZa/5ThTMwP+mVCf7kzZ3h9h1yVSzzVwWTWyrJXhj/ef1+Ssle3E56ji417KJ3WYCETxNLb5KrWxs9vszvvktQphU7S2/7+9uMTgyrC9SPTrBMHg45a5T4cIz+5scsnvyIggxRHoO40x6nPeBbgRe9kzvl0mIKkhSeekk5gMlYeea/fAGe9JB2CFBCWFCNrXwVWLIJUi+6+CwkiGUJkNaK66kBiwGFCRHWuvrzjAyqWspSCaxC5oD6b/Vd3ys6vqDyCmW47vJ//t+gS6r4Zy/1lY836Ab/jrETn2iidKG+qL0h6l8rQUO0u7uQn3lmE7/KUn9MXyhfjS/J/zN1JQ6ZZyJtvgom2gt6dF5+9U3hQfb/4SVkTVNGaKqHMeb9VhqmQmTRDwEhXXW0Xu9+XCDwh0aFVHhGZc2OnjfGMORDQd+CX92pgRUw/cvt1kLON0gYKorjUOqVDQ23iQ6977byJL4HDymM+DVwEOR2E/BpPr1GkA9hPhQxExR1oZnTJLntMYqjFgfx/kyrRFUa+V//wEJhwhuKhFNpO+ud+UC6Ix1f+gAJbaF/NwrYs4eWaQcuNcCE12bQXhrx//kDKbVpft5f1mLylFEeITXQAE7cZ3tzvPnNFCsmXzQFRitumDh/VdpMhk9i+JAJgCCsBKPbVGX9jcKLf+3yErRbYKVkGQFtqrPdVQYYnks9EGfgCdWqg++xX/SJa4pqz+Z5xTMTrLKqf3YMZBs0fTvBPKiL63bNBS7iARUBKSNGlGczDuZiH1amgRnVcKFc/HTm7/IiFE6Bl/yFc/hc9bu+0BF0DAfB4+ueZR4mO0U/Fh3uMtbSRmeas9Txx82+QJLcZNvm1IN5Z0Ey6efagYDXC4wsUHyV4v3v9KiFSNg0euFTrvSlulukH1Hwn75I6D+KHBerfkgAe9UiN68Whp7Ab3bCMQpfPbGqaCp9bxvWcgnBJPxrTsNbMAd2WTZmjXFn1veIyd0ATRiFSKEH9OI30Jclm64Umv8u9C8jBwE24qx2Z0EbpzQdwhLjaBdEG0RDxP4rJqJqISFzO6zLu1Sj5mljRw9Dm9UFXVzVm3RAoC7ehRw6KYXaBUsgfNHO24QuyoZOrUIPdgcxBU1L/Oggpn2a0EGAcqsmL7jY+kqqJwuR3wthhkC5lBS1HqWdw1TAM5taU1tqR4oFbHAmdhxh49Y8Nad6vupfPXBIiGcFD2sGT+PO1TgyGb9/3FXyv+5oqAaL4b+QKM/4TheRi3idGX6UOVpk6gdauUHss+0GkQbXUjE0ncpbkt9xC1+HlwcNyPvkpxJ/5zq+FpVbeXgr2dscdvp/BH/ZuMTR5AnUHnrDdYwl3FiohmdHl/D7knfswfSwIJ/iVu91AwaGQXQcUUKqZxVTNXkktZ69kT5et3797pZTqwzf7IrEM5Zh9aF9KLOgq9nTHyRzF+cuRxQ5Sfdh0EJUNnJtDaEU9hyt+tvYKh8fl3p/4ZH65ZjrujSpw/IFcyLYdYW2mRgIOjza/qeT98r7llA4UkSY8tKb9Ilw5UYGyaA7ogxK/TTUQARBzUFFQopkYq2EiP5vUVgBzkdKy9rtVThIgwCkY6sJXwbiKuH8gT0/DcnUtLmJH2lYO7UD+ecOcB18jrgWEngJrgczpwr7rHxCfQoervnF/z3N3NZX+nuFX1uatb70FY0h+AGiI6tAz3KkzFJfESekiUPvSfoHPHUF7Yuujcf4Li/9VHdTDXu8rJmy86wRfKW46lxisrJyH888XcsjbdTGf3hiFNgKVsFuB4dgRejFJ73CGQSEwe3oCp1juvj7UKW2H8YUNbXl9feXN1I42b0uQl60QZU6e4MhTb6DYdPyK+mUiyuygUMezxd7yx5Oe2RCtSdm7krCusU+N2n2QEhLt9NBFVydrD50/u3k9934w07MTJmZNkeuJwN1YTuj9aIANvC+/YkHiQu1w/eGLTFSThl9BTJ+VM7dlilzc4vKmDAT6OtwW8qhOzHuJ3QPJ4kDbKBUI7hXDHTDq/RaXpna8Q8HiRsbXY5gju8mcit05Bb6g7/dc1Vnvu2lpEcrFQ8WyOrXhn8W2Zc3z3Qw85rn+iTd3YmYpq09yceKjComVgnA/OMPndsKQgFDjAWRgMB4P0kaT6XB/ZjGdYu/7qs9kMtzlAXi6gFstzpEXerU//Aotw16faQ+ppqsW784SEgOQSOZP+tEJuqX60bXFO814V+OhPPcW+4z8PbnaHG8s6ESiXtCaTA5BKaY8i5STbB6xHFGcRliZ2U7tzUC2Oky3OIlmX150RUkt7F6Tycv9ACZxyRWAgKFRAUgQSywg7+d9UBUASJWlx1QAIIIUTGnMEPLWvvEbRXYM75DmQ2eMu8mygwui7SCEzkdnW675yL+ezDnX+YD5v300VqNBxPzUMDGEq82nMpLPJl7w6teVpgiBZU5bpLy1fVEE35yrpAzwMGtAMZZYh4RrL00sT3eDiWHmz9u4trQXxZwIH5xMCIHYWUXtntJO1MfSJjbl4b8xRmJrNMt7ZU3m6CnntDvtj0ck8h/CkQKn/xQ3jR42bC3Ww8m4zKLVsnm6BBfJMMJIJNndtha/uEJ4Po0BthlV1kf0IQ2cmPy1AXmmOQshmiY6NXdTFaB8lZBZYTk4oSqYbCCIdd4gEAbwzHOmoLAKsmafCOhDs4URarPd+D8QLHR+0xlbdGbzhnLv+JHYfiztGUYw7zue8l1P216AEtA0wM3203U8aSIjEK7/Twe3SA7+ViLcozrrfsjS6PczRhGI9UKUnnMlVXUbbth74QUuNIxG1zGtHW7JPjgZbi1tlvyFuX6SIpt0JSeFdM/oZveEX+1gY8djiiOhhftZb9Nk2bSTPj7rx/Vgt8nuQR1H5o6k+iah9fkb0Fl9nE1ygl/r1tWw54mLUd9GKMJV9VPAjmh3G2VyqqMKKbcp+UwdFb06caKcotqAgW2G1PI1Ix0PfxpVaHkQTYFi9Q6YmE/cpLrmf8YVC9Yt+k5yRelje4fmQUN4uN4dTvg/NPrH0fF19zveai32di3Nd9wVjaws/yjXJW1dhoeWx34jwbrU/yBFte0lxZ/9rfBuphFmequ+VmFh7UMsrwBn/YFzSAdkCsmlchGF4+5djFZxUPMM7+RhLLkpngkJkPQSwkaM5H3rPCRhWRHamSEOfgy01Z+e17I9c8JR/CEhxH/KYuiBZEewdtL/h5DGr7N4A4qJHsMzdFB1aLyWPWAcauU4iP9aRLcBIMwqT1sPMSyaRUUHZpoEfV2y7Syt7rqLdt1cpHUCNB4y71ERY4HxBfZ/GwXd6LANRl8fF9/73H7k685haUzxZHI0qUNHwuZpAvMfcsmD/gyyMSeW90HP9XRnr9SycDn+64cf2bq+cP5k1TASa4nwEmYShIGOtlHePIo7EXJ0MBNPH/L6Iu/sRbYWakyoITEOxDzXAzn8EdER6ESHTs/Eg1Wy/hGBRrH9hypLufNgci8R5hBlqhho/BsU/MpAqdxvRWFDc/xAfewCBfsUNQYKCo6eTUq8ygczpOJbQwxgvqFj+NcimnRoNDsvDVn1oOyxfO26sKJSn6+c+MoMFfv4uA8kJ3TwclY1Kr91Loi6mHCbit4a9Kraz44FJU1gQIUpkt0qRPaMaIfheGew89e6L6IdkV4rKMob0ZuaDXOKew+do72oCdl52hvXJ6zGkBa6GadhNfJ1wiakO19mP0t5aW408pvM/f/sT5CSaWoaBLlDt5T3lPBCIJu7BA/Qaux2dB1pr34Gx66leMwJfUKlk2tkBGlJAAmncQzo9xgjcqIjIKDLQ3POeEYg+HGxQjciHWqeJrOm1R9LFkAToYkRh1TWmrh1HktM1KF+iJhBY/VvnPUxc/e6Lxf0VcZ7ADootXpbYe540hMVtm99kD3q3FI+wPR2mVM/8e3kxJrpW/ECZ57ETR/jj0J1B5vmJz8PhvG/zcuYnmn4wAuou0kuN0++cNTk6FwQeiGoxlJ9Qt4zseXzW2O+ha3QCG6EmvC+5Zini+Vf2oqwjiTuZIeHWVl4LtGdUwBQ19p2XrDISpfwLENw0pmLvF8C7ASojWXS7UHzziHCjdKBUqBrAOn9OfQYDJUNvte8yPDi7j/h1ziAgXlfhaincz+vcGXkcoCZwqX4rpMGyllg2TgpjEjaNMeUfjLqX6Mqz2T14FH02hI3VchCHkCW7eoeo3Oz0MYr412syw9qJYzi2UJM9KkLja5LJhd1S3uhJuN8pXrILkTXC1rqYIi+8trQZYWq8uQ+SEb1P6aBH1memNHoZrQVULyyExWChdcP/knYGCwsKJAzP/OaC4y2mMJvhqHW91go9qjI0ZwxqLzrUtJ1r1z7Yj9/BN3o1fL1gyuqQkHiyI0wuEf1fiC7QGwkEu3qRRmykVU0Usw1Qy/GZPmOJCpx/F/2by7s9rVqyLvyorX7H5xtvt3d3fs8vByeH3tPl1K+Fb0nMo+StGpdC5IcKnU99dSkSzK4adr0tp5FWXLw6l8YMpsMNUGfP6cEKWLvaSMjBtOXsXWmCuecg94jeoug6A/wnX3t+pXqIoqnyxxINjjPJ5aX3fmnWL1/d6xSk5qobPTo3iEM6padj6cbr/a3IoIhLeApjUy2xzX/dnJd1XP7DMGeJBLfxbJLnEMi6jLLQWtBChB/hArDMASkrsFoaTyyDFS/J4gD88gQIkqug1SzPV11iWe5/lsodmGjq0yy41O6Nfr2NqbBg4ZBkjvanjXuWdoZc+NlclS1Bgq3FOxJtMoJm+ionYdMp8znhMjn9UVRHvdO0IpIcblwTWMl6os9jMfy7/viBL8PtoiAfgjvsbIsIMTJ5h0e+Eys/sg1yWCIUgP2a2/rp7LCezTU6YaxjTGg5LQlDDyhqoIS/qGNFrTvdkYZHhmSnx9JBhE4OMug1VjbDL/kjPOGE1OFda9EUJL5Dw1Eqw3xnJiPe8LD70N3ibsnuOZ/xcGsMYwSQF/a/L8HiESYnt9XSoO+Y9qykeqqBtU04quNa3YHr0LeuM5K8fy3GBJu9jhYiVQ8Oh3v/DZz0BYfaLLdfSkUW/QsUlSRy4X/xdBo6PkaeKtNPity+W+4TAXyNtI9oifa8jnTJxQVdEmmfwYppKjEntQtrQHZHYCisYvnJZEgv60JfAG0TuX4hQ7jLAcxf+KPUYdb03XY518CPjat/Aigj64YNo8E7hy/h99x2Ai1YaJJ7RTGuZ/nIkMnUFEmrMd+UqKzukymPlmz+2SbNZMx37GGxrtdis135EuHNmo7yzpyYbLX7sR0MS+MQB/izrfgIHhiIo7HvTjpCEUoPyyYdRHkTeZ6xFPKCVzOo36974VBC7wjHXz0ZjzrJEC+zdzBsjfuHHeQ0Wly/ty6UKL3HdqPG953TfsPndsJhouwxUnSaC4h5LCRlcHUVDu85fOswkpkcg6xcRRvr/wmRwVh9M0xyrGHmbgboz6Ldc+GlMLH3Vm5Z5kBsV7PPlgmKsJsOl3xx95DuDZhfl+eAWw+f4G2v5Ilt+D2xvENm1SiGcc8QNVj0dAhsFnjn3CG608idHLpOSm6zLBKL56hvt3pYpEj1nbTPelZiqygnWNBJbUEZ4VUrpotmty0BwWxYdFj7oFL22stU4GuvJqcKdFLRpXN5xgNRA2PalovJOSghyTj6zXfD4WSH71BXQjc4BQyonkkuokwV1KFeuJ8/HzDtCLjyaND1toF3oETk3vm93Qy1Qzqzj7YlRRzJ7WTYOS/Brs/9XFheHykkXt2LQTrzsMJ/U66gR6G6Hmi9XflMCzb0YMGN+WblFL6uHvzygWKtWIQwsixfx+YzCLBHRDlxfJe8m5Pe4t/ghOeDa7k0Hnqf6wFNDG7eBhzDCR20DjNQBULgAu960jOVi5BcjOF7KUINw6RIm3LOde8nOo12i+Im5ub+QR3c6DRux461qUXuJ97gGGBzhdM82ehu4jmcw8QU88AFKSiYqOcUpLZ7d7hJq4qcMCq5mejbdtqMibuSqlBMKrtWtJ4GXf9CxHevM+KnZh8vsrmZNRzgdSLY+gHw4r6urvSN1bARF/6Vnb2aEvjHtdf83YXuSAaCkPMVHf+AmTaOdBiRgd5Z28NtIv2bl+OJPq3niGQAX9iRme81DHD9FnBVfpgWPyMB/wlQIDGbb5wkZTyvQuPcwypvy+Fvao8UVL1Tg5/0K8luFjGU1DNMn7QsN3JH/AowWkzlXVlNFBpxluzr9nN/NKrEfGvps503eBQCOcQ3tHsYJl39Jz31H/zIO2FJqDeHKtYJU6lk1gJ2JfOKhboQ/Et4QXdyHgc0SPB/c5nOz7zRPsKL+L2GqCzvvXjjWTmSdJWPTDbM2QhwmPlrOT4tpwRrBCo28+cjeRO31IYM66ivvuLsOxBB3SmtZKuVw2jKjQC6rbNQkLmv//TJ+czJtgq9Jo+HEbNpG2ajUgLc+fJWNO0g/dop0R4EOMO1OF0Ib1ltno1QgzAnrILJ+vQ6Kw/lj1rzB3inlUK0/aQrXI2P4OooNvhKyHUS1bWAzhPcLpl3Xcy3OQlftDEb3ni9bUFib7iBZqBCzN1cjGwjY0OjkliCxVr9JHGZxEFc0DHhenC/nKRIdaSQdpKP9EFleGqkt+fGdjk8G8RcmpFY5rZHakeGC2DE+1xgkk4VBwLohQy4xVeHXY4ptAgWKopwp+IZmVT3OGZEfOIqCZLe5Q/jWCdSC4prdDQyTkm4CLhnhpACGkrZv7Ojebl88gEE6/QLdLbJWogLsmhYECznS+VA3xQkj/B782bzrAHwzyOa+TWNsVMJKVvv4lnfnpgvSDBy1ju5sn1cpUxz9IVrpz0fSNltfJDnYuz3JhPcUMEBEOtNXUy6m3uKcfoHTzxVehBfj1iSna/nmECC6k8c/+APoAur+bjIUcWJ7jmlUosU0T3HKllhz+OgPeZ0kwRlo6Wl1hnS5ZGTXqbjh5sKeZfEVCKSLpupwuAq0erJmuJRbvkPxztGuD5UDoPfTbBWqQbaSrSvTqnETuBlv1Frylb+rvfPNjXvRPuwzSTHWWzlfbuqc6Qq6wqTD/iG1uuNr2PU11xdYoLtiOERN0h28R4kOifWywstnws197t69AyiZL+24y6kGIuyWY817oYvy/NvOSoRu+9gVhRfukdCf4Z4xgc7BwqZy1d5+iSp3eQuy/SXGtqDzf6ZYWXzk7Pq/rN7Phaliayk+cqYC/5WgTQa3Kp6sNuN1tbnXI9D4MNDJdFklMdS1LoVfEvDirZ7FUJTnCmNru9ip1qR44kvsIuoMWbqalz0v5OECq9eJnEpwRyEpPh5xtO+KTsFwvRYJKaWQZEXx7bg0KjWQf3Mja+X2k8GoUl1LM5qfEx2/Y2nRAwFaRogywOAZpqxfT0xGrD2ApKmRWztSZynLen3MkHrJH0+y8eZMzPiArZR85tGj9UTvlhln8ndqYTjfXgC6XvMxHPKG8GvaPmVvH29p1TgxsZwZtZZAmF0cKs9SxYR4qXpewq+KNxtjzuk0MNaDXru+HgkQSZ5VYPYGRaW/zSwEmzJfOPtMLai+6KMKErsSinqw7PPNjIm60+APG7Zq1gfR9gzwKdAq6rV82SADEZDJrfT9w/h3SUTevo71g7Pt/A9DhzIySzuvG7ppzblz0ux/3VO3nxOCUCjVepWm7o97xUmOzrcuFYQDLi5VDvD5QbarhUBmXJbfmeWH1iu3Q3N2452VnJQLMD0UpcamcWSwYMoK07Neub2LGzLalGhcGbS49SxTOGgmbn/RT6xAaekRhnUcQK2IXCaOGg6gAO8/IIkQ131E2dvPw0OD0SiFh5aGxOGJ9Du4jXdmNOg23r5tm35wHLgz9oiiT2253qnCVds6gmxf6HcxJsrH+OFcBKm0QOupE2n5GuBP03kF66UXcUFOOC8/wjGBrnyOM43nB9E4p4+LGpQqzgo7CzU1f1P5OILpjdz6Whlr48eL64EjX28ohK7tCM64xkZ2mYBOkCxwLv8PpYlsqnjqCqv6e8p/J9tn9CA2MqVbrarfaQsQPG3aM0+H7jcQifcL4qGWrKFbaJdHhHWI7qp2YhxlZaI16mmEiM1HRdzdP3WULCxPUJty4XhfIAziTkBqsb6JyGcIYLN9UipVI1QRIfSRW6rIO5fUmx6BCPf3dCXKZqSwNkmIewUr/mPxG8KF8YHbcMnm+iNAQPXq8efSeW/ydylte3Dq/jauRt9bZSWAQk2eeXVRm9Oor9vxUsbfLMW9wDK54Ugn3BBW6UO2RjknhNImEqL4udSI6xCtDiOFeH6hI/z5rVmUM4lwVnFlCiAexJqOqq6sVBHmjtJFPbSJIGvxQ9ogJCZ8g2PmLyenumN3ylaGS1kd0KlLb9I2u6MDjXn78T1kgSHzZePotT9U1EJibduMUg2tuJpn4ikIM82mRYu7pJs8GK8PVoOv7YTJqdScuMFrMl/Txdb4PQdC8aFvWTT6I3zaG9Aw4vDBHZV45cEAq/khx/9KRfNDW8vwJ3EM4lXnb5P1+mZmIbvQYTvhVbPgwF7VPL81/DeSMmzQ3U8TAGMGqCYk9k2aaasaptvXASl9BhCr0KrguBIzrDTdZhL8vH7AxwvyEhB9gj2NMyb8B/YvP6kMZ4iDMNspmsaoVqkjzmLWMzFY51b0KLlxDcizcmORjaJx98b4LS8xjE1j1COticKdfdMYdb7m0G37XA5N7gj+0PYBlfSOvj+sX4wj5P6Yn8q3snMZ3sFuMlGPvZaleVbYCMf03+Iijhvf5ab2SNPVQGS0MU5oSK5v/eYY3mF6XWJja62FltrGi/UcotbKKkaMXLH09raKigVRebfA56lKB5oZjSHTb0Ojil0y4VXDK3SvjfBdX8hukXOR3DHtZmJN1xKpCfRwNtBCa5R+wJq6W2LFfEfG7P3nbXpSMdij/5qVUa1jSYb17ThMwk7xtlD7eZnq64S8CrganYEd0Pb1kZ23P9oxphTIIGq5VZdxLLO9PwaDxd6dOO0Pj3PQQsQbbkysibm8LP+8u6FfmjV8w+pD6gPHlTaukzgcfFqw1aXO8qHMJvguA1fPALbAFYcCqpQGCEInOi5NNIqgQP4drfdhJSDpA0CaLOhA7Ic15oqEzM2NosRrGNxwuLszEi7ABucxIkwYt8jVze4mYd+MiJycvK1PHRuGKW6rfkPBDlI5/In6CLaCqI/tTkTDvIjG04p6vUlgtcIDDXDFdXPiySGs0PpCi2FMzVPCLx3qoTmwSNm6FsLMHOI7MMCXaAdJvmxXy69bBZjbd7tkJvSgqT/9GsJhQ1wwdqH129jiByq3ibBaLe3Pc3QNGLE52BVmZg09v7KSR+P1OaIYZ7twqdETXaxrDQGUBvGXCtfvKhHwjTLdCISHANWt3bg2dunbHxDmWRSrRBpSyEV6mWOn1WeCcKFXmbMy3EJKvBEyitHsjCrguv9SLcqniX4hLYpB1TC5y2gFHg6Dk6uD0iuEvom6QaIVhs2zn+Z5DN7Nvt0tWgnnIwlFFnUZOgx1KthAwoPUm+ByBw0upNyipA222r1Jzx7MrOEtU8aE95Xe4LMOyKupwEUzjbLDSP/yWPJJMq5xIeQQXJKok0lh9Sy42gQpcmnV/peOC/z0BOuBb3+gm7SisO+vGwqFY+MREuJcxKPkQETO2lvEwOXNbGeOeVWyw8kTn+orlsaTAwUtwOXWLFX/NYtC5wUcyxAKaVvPiYZa9dDK9IetGEG0j/VJlj5PvE19JIsBXpMPhSUy/dLv1ts46ntUvz7iLY7gRgHwVxsdRlFiTBrVXCqujR6xeQsa+mk3i8WM50f3+7wTzGFoM6/s2XnGeohrMYvFmSSPd6868Rsw75rkcRjt6PCyfEyHld/EXhM1H3X6nWTxAWKpos93sS3fUvG6UZJNuIbeioQ+iEv5Nyonl+FuP3sGYM+ML9OB1fdQHheTvhgHr1l+GQyS3RejAZCdo41KGWmpeSdmH/jpgZy4oGlQ60pK26Mixg6PlGMVFy/weiOXWxcfhyu9WqjPEcd0hKb4OhCunvK8egiRIz0Aynr2BQ7B57PJ02vuMAfCZFVdM9zEDX4Z1159a8IAUEeOoK+FBfgudIspzP+e4pNSMB+BuygnEkEclPBmk3CWrQjG502c8TobBzb/VpgLzpt6VVraCPPfPNJoUR9nBVasUkgWa2HegzQRtQCTbSWXMPLZxuXFWTtEOE+3mVR7JG4YEYy+y+AAy3GHcFuxamLnaYybfO+CFufPEO7yMlioNUDgu7u+kL5nd1bs1X3oQ3eEy4scxHe40nhsyVbbBWwQxCLsvh7s0FU86xgE5SPt2Q3MX1OQYbsG5aBhOD2C01AuUgW6RCYUAgFyWM5VJ1SHADijs8bADpwhummePexUDI+jnAYj0l3OkOjCVvc4CejGtPU+dLiQGTR6qZjYrHnMVaEYJmeBJG5b01N0ax3n+YlvlpexqBiyMNDDnNNnFQPF/ZQ/yPY2hKbkaz+BD60Ib5OTyg0lFe4CppM5xrMSj+0S+wZzjmX+zBHVVHl7E8mU8keES8e+VUZGNEPfvNbIftHFg29g4rm6QRjnE3XRIKq5IdLoiDYt4XSGKxnlnaFFClCr7alH5YodIk+12uSrYqHK1G4IK/oP/Od9zonF4jYuYXhDp/dISRqsxyIytxiLwvbLF6+siyOrAwREpn/UHbKks9dkgZTqj1Zl5dkDfkrUw08u0o+G//r+AljjsRPr5jg9lyHohcX5DWQYgO72IuCQFIRqppJSY17TBt9IiztqJ85siAk8LJNz5QwSWGNyYWUpt3Ly0wDuuZXSjDCmndM9oa/DPp+80YZjODZEWUG7m6TArczYXwOJp9deUpP1YeD8RnGQa+BUNnCIG9PPYy5FNjGxVpnF/FiNhwrvR0sb1dYu5eLSramtLAVaWbe5ZufNUnacPnvnWBrRWEA3OHm7L88y7bgLBXu7WXtsswhgzNdG/C6OHwc/7SaDIFVbyv3mZdnDLPG55bBOcQ127bul/DndLJ6EXMZYfR6KdlArtWch+6sMO4q64UOaTflhPlVkfCs9S2lkr9rmUV0tR82RfZftOPeksE1/79VQDTpOXqyGkMYDLNPg35H8kQ4NHRnwB2vqIoxJ6Qwsky+rp4PMQ1fks0mAWQAHrOwPdx9OTtk/QHzT2xCxP5IrRAoXrBJtmh+z/gLhd7Hy0ANGCwLa7Yk1fyniU84K97DbfPpe7WYEf1U3AH8LUwoSonLOp7r78uCJWQd4ueDRyoctgJ3yWGd05VPklvf1zqCOPriQt+K1JcTd1+9MH/sMciEh1SKpns1iE1Vx6PrFYZhrS7RIjlna6mhljYFd/vhGp8cFZr7y6F19qkAh0pWLIiS0GjsczIpDuI5gG6oDdbk7EidJQjVZJxaqw/hG0C94gB1e18fJ0k+YfZPBAfJJG9UQjnHHS2iBUoA/2lHp+QYPjHFsXAG//Hy3RtmXyJsKJGLkzwrK5mGSeSUD+TzcXedysDegZk6Jj6kjAFVJMP0tKIN9yimy6D0hnqYddjLlVQrTfK4xg4Oe30ydY5hhdVXmxjGIHPD/fXC8dGy6ZPqNhu3GeUKBLmN9l3Uljc6ThMKYUZUBinA8EJ+ZT9Yjt3yEM0UU5VtO22NtuNEvocwXLOetC1UFnOhb3QUIO2wndEfGMDXrkpSJda1zWWlZt9HIcLKzvgHgLJ9y2YapYiDqRLMsQTSNXWuBDceLASvbrrHmNL103C0T2qR1xvR+dCLoqHCdOnIjVH33JuykFhQ3JapJoez0k8mwTeEjkvho3q2vP1//T+X/g3b0L8/4tOFoPCoTtxdp4rnh5CMUyuYUAL0rFEuYyreq5HzS9IXC+Bd7IurZw0bJoVrovCd2EHxhBNVBlWV11cSKP4G1UAw9X5UeQOLRclJCpH+b7zDnZBiR8/HYF/cfwpBfmc8iA10GugtFnNnpxrTrlIsuK+LOkRtRNVFINglaE8ZE+Bn+iiXRtzzoerC28khuFxr7Gh9TNbth5sttrUbL1sTVABSabP3iBeqljIQfHwysH711f9uyWeqG1i/KE6wEtL4D/scZ/j4CYfV/iWNzPnFOnDkRbFAO2nvtD8dUAUhoZTIa55LVGAvYFphLM0wd3bUqEpvT/Nqdls+aQrdyKZVbpWo7WSamL46YxOxR4IVAe3c+oVFXKHcgwVawG3ZkMA+CEOmx10W6W1sdbuRqP5Xz+WMnrx62E8ynx/2twN/jhVwUQlxWFV9gve3UPcNii4Ey/nfGeemSRmJqeAtE5b3YWoc7InlC7iqo61FUPAULWYeWObEUnbwFlFwgQZwGZsV1hf+WCyhwpwKufgODh9FNv/kcejuOemLnMicT0JnIH6+xkCXjo0G9QAg6tZTphn7hDIcAU+a8cqaOCqJRAu5C5We6mW8153vsmMxlhMCIWZx/UZs283QM5ZgK0Y+03eqNoey051dq4njkyM9N/TnLh/ZRFeEJD4B4YHyFdmKVDv2G7gnAhjC/6OwtwWGQmcCi8U/FZUljUQC5wTloSoRxnrpUmrMiFIk/vw4E5doK5xbzLp0+mOCCYc+/Vbl1IWGAVrzv01428PVEp/3V9biewhlu77MtFPAci131/TlcTv148bolBPBAAHCHTxkhq35DsTXow6Hc47DrimkV5jt7YcwBe7jM/tzVRB55nSPSddT4uzhVPOiIkfXllFygtiJ/edd4rrBhraPbewWlKN07m1zlCzBxXFrG4ynWWLQiWQSdwzxZm0GXTVNiY+lObP0HkQu4Utpe2VsrlYxk0dyCTyt1fmnM/k5thHKfFIOaI01uj54irhbxWJOMafSEV6YeYE4o2PMYN5vfcBJ1VfrJPr23Cv/R+pah8Mbaw5a+dmuXS3Lp4C8MDLqmaMv8k+n1KLwJt1s3Gzllv5/bbD2hAg9gQWX7jcNTBgOAtrAqgCMyJXvbdSFISYv7PzdMx52Wbgcq4gwtkKBWhku6kV+kzm8IdDReMcDaoeeh1rIkkfOOP3lQeKn6T/hGutJKqEbT1qU21zT79tjt9aBzt3LFcNTpEi+SY4mgyOyq7wo0RjhtVBjGqCIXyULiI/NTtDrhyQTX5dSYf5PLETAu4yW5NfUOMRN+MjnSIVv4Hi8IZNWt1XnYQmC+OiKfymAUZcKCWTDMtGQJ5TYkE/RIALaw3ajALZf4J1O33fjle3Ay6pLT4uGbHUxgaIudqRPpMgTgb66UeGuLJ1wUcT5IwjsmWACcgunO4ddVXyySCrM/Ql6TRlD6NlAQzT2kXR3/5fZNg6eZNmWqiW6sNyjh7cVsFY/rl15zarBvCM0Bsg7lPuhDMn63b3cVsugA60seJvct0OoXhIAFvRSzast2XJfjdS2kKZU0WfZDCjyq+mQtdmZXEWiaZmyllL7b2T7m6xvb7Sh6NQXSe5lfizHIWyBzZXIg+w9SU2x3ni1w0wuKyIVbyC2hqMmDZwRV5T0Eb781fUcH4JuAjj9O//qWNlGrcd3gZS8cwpuR1XlNuSu9VAvwDioRCAu4Y6qfloiYcL4aL5Sg1yTGkMEZYCnd8wP1bc28F9h9Y4PcUXf2ktuqLJY9qicbI/bdc7qsAY9T76cpvUb8oeQDRRCHZhGhCjuLXfdCmm0B1jD8LqBovFE+u1MAL/PgXDVGhRdFdPOq6xKb9RK53l5NTSaEDhpA1+OZqh5BdaKQTKRjlKq13qi46pOI1hDH8n3hHrt2itrnamSfeVmPiVJN3YPaFvYgC6yPTKdxC9g3btor5h4TgxY7kJr98GGG+U84TlmReGtwOCGnr1vacdzN8EdnIXHAoSH7n+k2wNCLQoCANBs29bNL9vmz7Zt27Zt27Zt27Y9i5iFnM83ndQfU0L8YtvnI5ZUczF9WlRdUeQIFXS9mRvXD9XTwNLWcZqJBgzfYfakaORGh843XD4MNhPV81kXlZBTjOMKK5CelBMmc7K5XjVwb0uRSj1LB7QzNwbiE+vEfyE2ESr9w13V+NXeOOfgC4jYHhzyKnqeO5dN1R3pey6DifjIJFFtjCxuTTwUne8YVmBjX0e0T/wPq/kRILLsmRGBP0/FQZ7B3b8prwy2QaXMjsJGAeMNpVSd/s5S8Xl0pCXqsbxxxOiLahL3nTP10LOQds3n4WqI9K9vSTdBLq+eDE99S+1C9yyhrsQ6w/AJKpEYQlCnfIN4Kk02ryQh7FVw3nQQUzxkpB7uSXwkuNZzATbtg1XooeJTMqO3KGlsL11tnUDkyqIqgRU/kkOazETDoceFNDgN9aKIJGrNECw+UHafxRudF8qnq0tYU9qzCDV96uMHMFA3p/uycW6V93fpO5llvsAozXaq8WFBjLdlKM7nRV8foIHTiCSORfvKjb//GXs76TvLDnWolqD5B16dDLK9GZBfHqUWE3Nfepf8jX0gEykk5noIRuRsjBTTP3li8FViLHGCmKmXEiGmJppTBs2i3IundNPJB6WuPtFTUmGXOyNJqzRr88DVoteG3l6S0wji4W2zDQZLADg3MUVPyo43MbVzSJyl/dbosC+SJuydQD4Mq2Bm1fPuL2ISkVXQHN+oSFmmPOVuOboX8y4/Mc+c8+/gBgdbcwVaqQsSkc+FNICXWypaJFxeyTn/SC0jcgGhRchCw6yugCoMH+7+CSHUw9sgAEaDZWPwQ70w3Ee3UvXGmtQQot5flfnI1sGe1FzUmLocsOkeZIsg/djPsJxfq/YVPW92J3+f8UFwCdvEEHfr4h9kSmHl4QHFHEtJfqnWP89nLBhLKJVRZvmyHHglRMk1XbLKvicbz/Kfhq2TTprwDjEV4G5xNRnZXZFWXe5BtQjkxN0UWJpH90+itaHLK9DKjpLPSTiRm/TzjgekzoQ/LWkGoVSjPruUg6xQ9hCQwqaV+Wfeb4HvgJ2wuUpqTWNnh2nHHQXdsNU1Njr3itUQEp7w7Sp+Kd9ZyKR8xRVRKf7BTX3o56e+nYT5DE/wyRPOiodzITp13faAHKGwj8PumWwg8HmwEpc0YjTUhldWHgY33ojxG0A+jaPTD77CpoEYjrY5O8HxzBukHgK7qu6Sagt7+ZPR+8DHB946O6wEkDz80sdOV5E35VSCLSxrCmpII3/a4dRhhZSbugNPxPwsBcOOP8pYaNZTOcci8lte40hDh1ftenTJdwsJEB2apb3sq2ckPMF6GdU4IxJGMifX05t/w2wtF8Qk1KX0tosy9bmo8YvQYFRwGgiGkznZDNoQGdWlb0KtiaZX2aYhR/0n1Q4JrP3hdztfjLPPpxGC/TFFdBUxn+y3cOd2aS2TF7qcUUkXcO69IUfXpWG6Vz/kiRxb8blDR/JvqHJaxD7ZGdKuFsp/i/remgqfMQNZXiTgzUBjYM6g0u8gMtcEZt48hs49jm7SNnx1DblvY+MD1zDxPbPOKP4TXDyIuQi+mEdBk0MeZRPBhdG3l1l65p9dgjCfQ8HWjpFgq4utulX2MpxyqFEuEwMDPrqu1fE0MiqN/YsxQT9uefvrZjQp9U8KeKDjo3DCoU/Uo1FdkSlnSJj/fn5V6SX/BTb87P31PcL7s5/lGdCfa+RRMOS7wLHSwJmxZ8MtBLe6Yy/UVNTcmg4fzuz/6nLBsRkgKgwt0P++UVLp1vC5ET6N9fCZkA+efXhY+n2T3+hkkQRiMXWgoLCymxy60m3uiZb0/yAFKdePa2sHWTWMHRXFjXQtaN696NBen5bvF+q50AS8QYkS8xv+TN1fh2/TbzPpXhPE4sDE9sQnRnX1XBfiz6BKwt+nQlGjJTiMe2f+9WsVHS5Ijr0NpNIssFw9gTkJ0C5RWsPzTp1mUtsbEx/4mgTPHCXmYRcn07W3oBbGSeZKpMDs59ayQW3vGM4JTwouGDaNyrezy4Zkl74xNJaRxwqZGa2Q3bicL+Idgm1fagYe2ZN+4pkgJmJtFRFmto418RUXm5+Qg8klScVZfbUuVo20bwMY3DsFU9yZSqoiq1yODiT434REwM19VnFb+V8w8SrJgTJFTDK+rrOSJEh4e5hexKdQlHMlWkCE4Ycuc5Jh7in6eq5f/516vdLkBV3Dknga+UZxfGhniwpn7tGSt5Zg58Y7riudZeAbsEaDca27NmvHW7ENCP9HFYRrzghy9SsUPqc71hgGcwr+n9bwDwraeKsTpC8qqNtmTbitIEF8AQZtQ15pfJB/39alNq/8L7Dj+hEW2EV+GKXs+SCVXBiu7UD3/QfBeV7EjkBZo64oIAH/a7x2KjogBSQrJQzYfGsbqWtpVYhE6L0cBh/Yr+1E5rWAkpr2YhwghybJUSG+a3y5pLwjhOehjjyZDr5IaO1Kg8z+0BlOtYat+cslB91L4HKcH4q0Kpa+6nMHdM3a5hkR6+DODCljz6UPPfefAOG2G1V3vEYj8t2FBKcELWFgd/BAhFK/SgbN03BCs2ll1EusZw8XyM8MEYgRVlJ8ooszY8ui7ciWjbADIKpaAoa5Snh7K5DogNpi5K1EaXO5DdZ37tRX9avpMbMXjYMsC2/opViN+pGqKR75Z0s9KtdPIy22jLuJxH/CbNFK3jC2t3UVG9/ANoyJuZZ9UmbosTqcQCObNpYXoD3Tf9sn5pYHqxScaSB/E08kYcEvqlGz88NMKFAlo+mRf4IWucB2aUOox+kfOEbCimlvPtJEP0XOr1DjDt1mB2OZ3+gP6kmMGIUPL3pmPAPnk/hcJvI+W7Kv1Mn7UEh1G1UiZ8pngOTveJ/NAA/PMwj4+5gLUlYkL85ldmQQCXcSKV+deqrEoWMPAT47M1nGAcPYDh9rJMhUvQ0eFYcGJT0MbPzlO45DpIiFuFB6j/dIAzzBKTdPpTQFdh8GYmkrhA1CBoOquZO6md/Uh4ROGkG8yEBbFmcG9/qWr6WVpcVhVUoyqSRECt2kbDVN8qD5GtwvitHDF1Fp3WobxUnMeQ/1MWJOAA0du8x9jMySGJ9OFkyqR5jjSGj7HtXwNye42bdFiM5BCXOBR15VCdnhn0IvVEK6lJIAcXwx2j5XXOdIQVjjPonLOUZsX+5l4QIsMqWNaBO9KqMxhlu6bowZsTCjEkdslwb/3f3+F5D6HtNt8/qibz2V26HHviLX+uxS1CZBuo0IqqoZd7ho8M8E27USDslWHWVWMB/4n8BTnrh3XrioLJnuUpbjiMRnTlm2s8jLn/qlOvPeBPO/SQC9G/rWn042M+DBCSu9+s3O6oCV4ABBnCMecq9ptQfGX7b5cUVFiERi4gO270/lFJ8hWnWbViIMq9dlju3x1hPNmcIbIzH+EzzZgo74dq0EokulmbUT68DIBYR1k+aQLScSJZyvUG26FDfTU0C4tQmuedu3lBzeSIajrzctCOPppaQImks+h+BxM0k3+OVblIoeIq7hyU83+TJG8K1sbE6NQy6yt/QhS4ZGgXKEe+q/k3H8LDrVsClrYg3Vt0mFWOUT+Zvs/PcWEzyls2gXr6397VeLQS1lrZUDUltGBgWBu4bxdb5lHtptt2TPn8S9n2q69E+1cw5+s3EJZfeL+3CPcmWQgaErPrkoLgzRnhpgDmISAQ/As6vi5hpetx+Q6IriVlXFwsm51uzKZ9XT/hCp8DAISn43s/EdhBYUqN/fff/y3Hl2Zm1azlG+s242HfNGU+qCbjI6kWnhQ8zmouRPSTjXC4AwwSM19QvlukFiY6S6wWQe8Rv/Z5sqUfIksmbwTsQpSR5dY4n5MXMM9GL4OMk5PVtk6zXQdzeMLjXQVW4uq7rd8csEXtTRmWdSGlvlI+gbuMPPdULuFtq1e4l+8sqGrYlUSLH30hGmcj8bEkjOi6WHZxLiyKAizQMrh6Ap76a6syLsVKh4kt6ASNrbiRXsCmy/RsN/0PCZYjfnDA77548AG3BQ53iXH7mRU3nnAfZnzI1j47WAOH1j/GSSPEhfYvRbn72dtQDyntc9ibACFa6Q5a0N0mA83ZcPOv8r1NCAegz76pGfIj1+CLNuh/bmCB341jwchRBAxrcq/sNMjWuwquOC/ayWHek6FJCiLKlt4ARokooxATIyE3aloHYtyO56j8Mywe/qhWDvbCsIQppHv57Sxrc+WYVP8EwcqXmtuFjql4PLfm+P+eebmNMgOI6uENXdJbC8K1BS6RcSReysJXtkKMFmRChuc4fAPwscLp+Mp2jE1JpX36MlXzNYssfoYySFAqSbakB5rt7cIKxlytUlG9Ol/vlzeI9jHhgi9F2PzWTqhj39bDC+G+twov/D7bnhBN+NWfRSZdFi3mNV/wcNy1IipjYmsnbjdHvO7LLJl5/eYchYStGNKnKwubHNdh51uni2Bhf4E4nhXd6BH63ZWt+xg4FYWA+VeM4YeOxm+gxy1ELyhamuzhraJ5NnfGIN7OB27nmwfqit8U9VtMDhtpjXyBkWUljlHYeyEVxOed95eN5XrTrMc8D5BG2GbUhjOfZI8rYLywy7X4CADhLsuCHRW1IYLWNKfN5D73F8jk33LS5hn772XxRqkg8DyHbJmEzXK9v4KOovH32Ci/mNYjtQTinalgUqPMpKuIZCXo39wTFMG+nkbxfeDt7n49gavQ9/RfjRF5bWacBgcvil2ob/9f1wBt9kb9E12n02H8QdSQTNS6vpG6m75n82iXTyT++O/hrG3xe0V6d3m2Vt0Cdnhom8kfeNyzKHdivL3OPmXmw/b3toiExltd8+sJkBa4bO/IsQxulh7HVeYQY79gmBI6Vmn2RFsRSVVfLJIEq7fY/63lTk0WnM0lh2Dnw6lSTHnjrJ9oHoakblzvghM4/3z6KdWxkvxGGtgJABAtdacISb1D4p1UKCm6O3uUTtt5cVTsCTTFDJX0LOs7INFyTEWtOiUWFCioLvW3KUkzRmLg5C7AOd8lWpxt5fFFmkU5h070CmcovjABelFs9ibUSAEhwbj6Ibr/EROxitk2RQe47yok+z0q0UKT59o4cnIOgc1ATM8GBt05VrNZ55F240I7+X5lbJElXc7R2WQ+zTaLmJFTri6S3wJh5e8mNIrFiuCBIc8j367lEyzufDtsxK38DVI+FjZyYdCmOXedGY5C/NBj5zNgPgaTyo4tCYbvHT/FKhHbWzbAzZ7gk1GLXJ1MvAPsVJ82e1q5scPjdcJ2H5QBrT+iUPwJnKa4SMXPoKOOEdMFmkzzCusm4VbCwX2khonAOamUTdDHMTULwCMLzfWAvuu0KB5prJmQGjiv6twyCvzv8am9lhXfwYe5vb8CGHWLXCe7H8VyImXlSdvZqb2Mcg8CpcFUlZx0kFXd04eKs5X2ZUlgrCn40iNyNnfWD2Cj/HQZ5mx2w8zfgHQeizm2oXTSqDWrwRQjVMRvgfHi+7oe0kTadoU2heBtqvnXWrQQJfM1R9yhF4gtKQG6q6tbt4CWIxOIrlXDVr21oW5gxgcA75xubee9Ad9jQUw+bnHlntr402v8rh3wHAN8ag5Os6I9ea39LUxAer9TKPJq0X8FsUMXQ+/10JQOoE56vznzeAqKqu8aFSBTMrb8AzjhmxEX6/2nSBNUdoIIIJsqtCdNzDa/72P51TqIZpbtOz6zFEwVuHGC2AXP7A1e098yUfgRbAAsrCUrMbh5T/qqbmOJjeJEdUO/Bgvw9wqtg0lP0piANt6uLW/i8nRbqvHMbK9Cm/kTmfXs8t61uVsJJcAKSfPMMJSAjOtk2W+z5V/Owzae0MoHgUyDZ5SsxhWp/KxpjMsadC3E+ROk8vnRywNRV51f2KRA5sqv0lnEcYUQi1Y+csTHxqxTwVXGlT2mRFFbH3vBo/D49fFPyItdIJgFIERdPHHDDeuigeDDFfycjBuRf3KYSor+ehPAO5crzaWrdChp37G3CrsSlk6vXocsogrqO2GsdFa2ItwRuzuARlBYDX/dd90nCEIkpLEmcyM/IW8DwGuCzbPQZdQiCT8JGya66tO2y38lAcgJTSKo+SCA78u1m1F+0cLaHK5gycM10AMmZkrHrPyn78xy7pLpgR/0GCNrN6UXtvYMCvxSVMhJYWEiQNqoAu7uecPvPW/Dh/UOVvhIJQyN0SXC1i4M+KP6bmtlbJYLt5hSgf+374mDHWgNyhF6QwWLr2REUDDCOcCasJ/x2GzcfPihno/hIx1o7f+8IZr8/FLVXVcYNNbyQbc8JLcbof6HQ5646h8TZZfVTTA53HluLSBb40Ul85ZU7sayIQZNejRIM6P+/u/uWfngdn3pC7AbANDyEpAZLzzhg263sIw3hDs3xrUxdotB7GGTDWEjsdgUq1NHeXnIvbNqkbb6fnxmZ8j5OqbrRt9DB9/YUVfOkR6D0YX0fQpUy/ysX8XU3OEXHGuwgLDGGKrG542TzSULO6mT6lKhIQcPEd13EXxU8yn0XVg4AcVEds0fZwQ61ubAzEmlCqH1vxSID2jAUI2Y/gLcx0Or2i0KMyoW6JRPw+UaM7kTA/qCAXBbJLOtsjFVL30YxAJNfroy40RsglZEawVrhIA5vbNqyoJj9Gu9ZPSqbebEHGyXmJ90ol/N7ve/7+xpiL1Y1hUo00O75Gu28Leuj0xAblDn3oHGpI1waNc+VC8QUc4HONlwBccBtqgxq0aOp9ONgX/QE3/VYobJPAkEfGOqkjmrlafrw5CqEO4jJXGZ1FIvRjfkuOT7HLuVS17j13gsaMMhn0yEHLZ3W7GNaiB+xjeOHLoGnQYBTERrBSR7FKgUkPvJZTYJW4IW+oyLNH9s9a/KOvCUSyVCjZPydrtzOYRQzCJB2EKM+4BETBYz9Z2BUWWCzPIZgk60JMhxtlI4ag1wUqU41n8VyNJGdcs3Rjjys0wJUDuUyzovceWNUILENKohNCZ4qeoZWwzTvJx28SpEtfhrafhznZ9UQ64Y2nJw9KQeEsU8yOMfG5NHl4qSo306cUzQCgEJC3wJtW6KtjztQH4v3yzWeMG5RZMT5FBYIOHVZCgIXH8/lxuap9KOTYiGb3J1G3jddnWi5CpyAoLtnhv05phOxPhd9wZA3h69GLvnxSivjmoBb0mXyqscUlCr9aL/eJ7+Nxix1yiWeUiiCw7ItPoQ8wn3nZjeSHQ1xEpDkEK3svVX2qt2pX6LgnDVtKMJYQrH7z+jA449/B3Gp5jFfWxpXo9Smulcfd6Ucc/b7Gz5L3n8f2A1xSmLpkvCPKKzclhAQdzyVFr8yAc7lI64b3VhUM0BkP0DLyn7ArH6JxXlHlYBOOlB5ZTYshwd6seAi1imTyz3tcqqyCsbt+VV9EbE3rpuSoepgiRDqDMlY8GXtJtajgCtw6vhdpi/Iavlpcr7FMmifkh5m0kgBiv8IzhKzz8AhIGWZFoWkQpcCiSiz6qaJVkTf9Xw56HbvIItRDRFDuNkqNdUY3rUtad6J525NDDxTPltHxqMOa4YaYGNXl8zGcOuYqqG6imyq72NK+xMEIehV+eM81HUnjJCyrhfsKWaZQB2RTWoA/rXRZCjcfeWQEIkFfZfxzV2DeOis6pEnxdlpGUu8Egq2/9ma0vewVtKhkBQU3iMSGuYWOpQdY+1L7LeoHmEPMpeZx/f2jC28ugnw/65L4iDgOSbakDfE89RDgUjVTnBu8r4FQhr3vHyPkylKbIx8KBVHtBEmHPFqSAhH/l+Li4sRB6iWL82IyXrlyt4/NhqJZCb/lgdZNWGtjlWOxluOGyjR6krYyrd1qPVad+I2dL/UAmf2OxtszIu+SwvirKBrah6hAyQpVw2N98thFldWo0SRy/ARY/8BTvIRoT5gAu7Phy+3s+JW9Rx8y/ad0c79Ox/GtEqbGGrfcNzfvETEaG/gQHPEDs/UAommRc2q3Tq68QUD4uYobFt5mUV6e547tjBxQY3Pm5NJ+wCYonyrKlSyvMiIzXIUqS0JV3FyiX+mYmZrHZ/WpfVJAGeiHvT8bm+jDk+8BTC4zy4HkUSmutk+LQ8plz8afWlytJOwT1k35fDYPh6DFZeZFEW/hjgkI1XGa4R1P77hyNI8ZofbRp9NdjfWj5jrgNhe25DvYwh6KyOmKnGw3guTuH5J7Y/j8jywS1pADGp1eZKiBNzkCUOvQCjFcsYntuFzYfK1QU8MwbPfq15fqCJxDjGa9XtLwYorvxT47cWUDiRHcqGm5YsVBBisb7SDD6foFmdN/z12HIaoP4v/Kuo1CXbFjFiFGyqd+VtvdEyZaEP85xWACo09OI6hXCEaoiY66X3aVHC2GPA2VHSeOulIijsbREKCGHZQW9xlmiCd0NO/0OhnM1HfMkIajf9z4gDYJAJgXB0gkUaZb1eTbi//7ZzWvpVQ+akNQyjgcdDwZ3LhksOlk7VXFu2TZhqWDH+tmioloIgt4WaHERxqaUkeo/L40UIBLg8kA0ONSD6CThUXEarHDIhSVgUCMV8UNVX+oTIAyRPNbu5xm2V64X7T3Qhlqb9mfD49f5dP2tWsnEkhctgoFnb3fBYCPeyuxRh1UsGSxyZMSoEuYn4ndMbFXl6Z+DpbA1fI3uKtxGPwz+V1kH/18IEUeaux1mUSM49hTYHW6NYGqSfh83o06HGLWGAIB4uML/k9C+G2sadOg5hxZ2iZOPcw3yBba6+TdzCO2kDSS54vB4b9/c/UWx5HMwqzV1fnGLwc7VqdyVtC42Jb8GYeT+YT2HQ/SmMSOTgzO2YNKqzixFWtSPZHaQCWeXotCMDD84o0Bc1VXKS9WUV9mHcThuMeSo6dma1YVe/8BLdgt2hYd4Nhi7GuLQEoN4EpsCITMV+RBue3g2bn+wFdLt0AbH4WtQmiFFS4zHtTokqfe15Fh1owecPyLtnP164uCCOwbH3L7KEly1QTiTvzaiyV0JtfKGCEcXSrrMFcF1mdSOdCYlarXNrpIFDaKJ6jB5smtkmOODcKS+2p0NDwaHyCMKJ6S/qyTpEzv1GWx7lRxO45wprEzR16BJUqjJn2deikgjidl1MWPjkXWWHCczrroiHQ/K6YM6LSQRNQsPtHO5gJB9b9tUDGPEyhTHaUXTYBi14qf1RFfxvwmdkjXeV2o65IJT+Oy9+UKUi1NaKC5rIiXKR1GKCCj8zdEmQyjGlEv2fK2mbJJx8z7jVJuSQ8WVChsoj6FExdJ3mzjugSR5zvji5+2wwx5T3t52Nc+IB4eNR7W7r7T9g321TevVaiAluQDFL4MWpWSkLcrtJ/rwVObBV8LrMCdDxlDzI1ZhmxFNxE6fTWRTH9el0gOhxTy+Yr9CuE6qhIOC+LJHFXz0AuG2tJygovZG0izPLKhLquIfB7iubPgd45jOK1VHFiaC7wGNwzXmfJRVWp/LAzEv7HMJkKPeFJVFL0q1qIpIhHrxeNqlhKYp/3Xt06e3mKShjzBFh5yYEVBWk0n1vEw2LmV57WMQ7TCMz71XxYdWe1nEvc8SP2lz2kNaU6yIwcxLnfKJywTGCxZJ6A4eO39nuSq6QlUEnq9aiDTojWeL1zLnAhb3KGCSmybFFKUc6cXkD3Cq1DJo7fPom+RmG0/8k/RSCe/CyKpbxl+/89bETQ/aeyuzAgLcQI7HtpgEaHBMRk9zIMlpMPjENd/pBuo7oFJOnq++71AdyR+RDkH+UljyMx3izf3r5ufOCggH3oCaVv4fhPerwC4RWQIVSSxFQD6IBndWmYeImrdnZvS2kJSt3Phx6/o4Cs2LrQtqXek1uzpsPfshxR4D39mTMwdYHVoxFY/7uZ4y0R+JdXWin/PbeoDviJ8K/HFQotMxIRdU8g6HEzkAsz8du32Eu8dMagn1yiWJC7v9nnGDsG8Q+jSlG1p/J9p1IbAZ7R8d/dGiE2UhDom5B2KjuabGCdeiychJTo7cGI3zRUUQ+NjV3FV3Myszuo3fz5risNFq5Zv1Fs6osmfXD1ma0M4wOvXNWWlq7dzG3QsDzTeYaYHEJhZAyuztOQJHU4D8mEibiejDNXhCLVP4HWCPRLfR4wKLsLH7QU/tfgiC600WBJjy0lYWgWZXC8c2PhdBa05pJvTLp+SpXhv6hes0mPIDg4mL7dNQEVMfnAJloeoaj/fQ7Q/+31HWxRpua3jV/6D0ZKwTutuMkDKJTGii+7uDjXZ1e0RPotfrzsu3t5U1j3triNr8eLDOgRWCq83boU+NMARNTKaDRo44HQc/BjBipu40C5i78ebWxH7vOEBtvRScWwkYw+RRsSYoG0pJl3VY0Bhzs6s4v+AnGU/L4SC2sCCF+5xgywRdUi/jSM2VnzOiuZk1zZmoV4SoBNvuGhfd2+S5iCwjk0ioI+X+J9G/YoYJ3tD1Hfby06AvYzQ1bm9LCDc5/50vqPdTk04d5rSTVG2whAOtbzXRayAJcjdrVJiNYObiTKUkwPcp/CFralVI3vO5u/i0oKU+NL58/YRf5NH2ZABySV6prBCl7VAfPcMxPr8+IS7WhVPEF5urUE24NlGrIxi2U+k5C8jtD4hgGoSELo9qu1xhzNt+bAAXPCxoTH2rd7nm6a37isVdg9uPum/5UrNixcLQ0MFRT3ynwIAXmSWJgobD7BzxNRFU5L2n5Yepnh+nZrHSqwUdO8JlaslR0hMVfn+q/2IBzFDSS7UVI9Q5QYPKlqCiMtbWlL03vxBhZ2jpVItObJpMGcBJxQjnSBhymFjyVlmc7rL5NzoGJfgb+beV6e00osSpTvxd0F9SJb1lx28/JapZsIZ008DCId7ose7W09W7Aij5/RmTxYmCkyWx0SBzJQgHUzTN4Ptir5ycx/lGVyGYwsUH29rnpAyV8vY2Vj2bJ/M2xuL5dL+qSKwTuH13zSt95l+v2Wc9OrNUmWBP3X9sy24YOQm+5ka1bltcWOSKfYEg5qZ7BAbRg4tcSw6qD+b9uowSMz+XIF7go5WZAlERSDLrko/mayaATkjUFfvFC1qhLEugkQeDSgvL0JrJa8sahRiUhzLdNtu/ZjXX/wIh56vpoKjE/F6nY2Dsk89YYV30oIUBYqQaIxprbTplkRFP5BW8NFf0QcKNySD8mSn06pU+8nuiM2nRUZSKoljYSD/DNDvSJrB3F0x5NW71vxr9MYAr7csbhv4tK6G73pKV/XQC//4QQMoh+l/6ssEa/ttdRHRwr54IPnfzsvMkLDtkmcJk2xrwCrSVAnKZY8xKJmIBpu+C/y3dDg1s0nEuKbDwgWScVwfXsSHbaVopIQZLaOiaVR7MmT4CDK4BG1C6DZ+imFpKUg34sXNPkbSoL0Bq0erVnI3La5pxol2aL1IQndHwLq6IY+BwAsez8SR68hTA1BeZpO//KUpUiYts95EbGoKAmEntDN1M9BxNSOxhPZScv4UEmS5U2OBO6JqMPeQGjFx6FDhgIAyAEFrT+OzQiTrIm1mO5ExjjwmEYO22rueLTmRsiQMXpCp5rzSW0YoLWoUYX6RsS1FYNGV4EJPMjXVxZsuMQ0Lo7gDss6r8wWv66bRmSLoE/E7RbXtG84UiRiQIEEiDy+E5HeuAUSWOWWZ16el0xexGUWFymyMYFZOAWw58ju8GC3nz7tTGoZ4I6HCkjzP7XfBzdOg36UPQNIRmYmuOVAsSr/NGThQdqOERBrzGzhzl4aD0o0bSzryVCjOcNl8C2JaWYTD6jXubmMy7SdxbLeHV16IN2vKOiP0+2Y7dLUeqQmPyT2aRlOFUp4Ld/iVUYlM7/AeM5HEAXpyLSODUte+TS4RVad35Jfg/aafTOy67D8H8Ad79vtTmP/4tEQOCY9yg0l+YEX8Vyl7aLoREiGWHrkFoLPnMjsHL3kJwrHFhf4RjVKcs5UFISC8JaUQxte+me0S9+8sqjEtNFHZa/NDaFayJAtHy26AMVwCyLKkJc3JMgeGqPdj11lyxljN8WE8ox34CzImH6qViUcyOwjDiOrcM9CXjcdgnLIk7L5hq9A/khH1+asWy81n0/ej0ntuNWJvNjYhOng+7fWUMpgGQIOyp5DTlV0HUY4b/o7pynAGX+C75FMzRhLoUPoKmEUEfF4Kb4V3cJAVcDaMeS0ckxSXMh54/vDMWDFFAFtAIqyPhyg4/1llbYWpu6kFRiQJhNokouKzfwImjmYGcjqoXuUA09v452fztChsfqPT/7ipEUTG0ThD9L03jgnNNEXg3wEGPDHucOFJL2ueMpOzXS5Bup0SJQKBU4KNC4foIXl4kxYkkRrILQ5mkl4Fja68j5BPcwsrMQrxVwkZc4HPOXTQlC+P+m/0+Q/uT2KgSs0YCTCzUzDXla98Yab/bXE2KAPq/uMypRhdN0m84qe6syDH2vnjrz3K07+DdPw+IGumZHvX0qkz8si2ywRa5yHSS+BphiiASt7YLASZ7q34gYJ2w9ZOGSNje8uExOXqal8T9ww45ca9lRWv8Z75Xh96CtzNChrM9ZBv1K6P2EjOJDhlKc9xZQMPoICCtAD4Q6Ha8Ezb6JQJgN4x2UqiGlNUuamHQhcaWAhcOjX2AduRmdCLB/g7KyTp+I2pAcMsCIyaaky0lDyj9m9jFPitrZEl5bod4yrvzHs9mtusnT6w/jo2pxkYmlidj44dipKp1qUevqYxSgmHg8/vBS1yO9dqlWcB7RamH2XieRsAG1eJCJJhKXZH4xS907DtkfuDO5mHvkBJkGF3VHzDi79sCLMmGmcBjYs8WNB8f4KKIAyMQB6C7ymAKlD20kpXnmHCoI7SclI4eqh5gL/9mt4BiAiM3yppsBNLCVXFm97EUTm00FRf07UN/TSzEWCUMdd5RVcVrjDOgwdcX5O2ikt9k4B5KZk9rXV6w06YrKJwPQ8GYFmemeU2QLJtc4OEUqlyYeoaFxsdq4abUXi/1LuwLf3rDEyAf8ZppOECOCqRmWtk3NL2m3yz5SVkaIZdUc/0PblMkkteUg9+j9YdxLjHrswLFppZSWWB/HjGpqCpZMdjzr/aG47If9a1WAA5JpeHXaYtroK4Z5qRUJAJ+t/wOMla6v2+hCEz1+jf4snAy7zYOviTV8AKKWwfLQGGLKkBmpRrpWe1xSz+Exv0AT0G51pVUqW+IyaGROo+ypjwmTXsEIJHRGfVAjMiAz+ULv2cIy9OFBxZiYulw/l5RHlXwgNs2WLBSY3r39+5egF1yWX28s7fDsAMvgCfGVFfGAHabZPIfwDWjMZTnneT4f/KwRmoF6AifoaCS/PUnT26HIxf/rNNUQ6BjPkqhZIu+mjgKdW+8OWEnjdQ6s6kBLxZukkLNZNsZ7+Ib5g8NYWeAxn0xugJyZfJ89ugVPreLVb8RVUPTIM+g/BJIuX3ax1MT7/y0axEI5hbkPV1t/TuX8NFmtt/IfO1bkCRaK4XZxW9xklsmI5aQZ1VT5CPHgWqbjmzJg9pBR8x+jdrrkUU0DDCZnE7ee5OiKlmiqwUl0i93IL9IAiKtnLIOXwd9dR2iZ8VCAzGpHgR7WejIQsTxLPjYBLnmwcVNqItojgKohwNga03u4kgvHR5SgXfRLv/fANCkX54q3EoY4zxpo9VbMx2FU8f9td1jeRB0xwH4HyBAJ83P4PBIQINQGqWrTgVbU146DErVLj1zU6+It1IGITFBucuKY8Wm561PHPAAhdInxEoroN7uBs4KcfzYjIJUrDiqye/Z6n7B3Xbhc7QCVfqiFmHgDPFQGL0y8+MzbsIDagkbHYyrQ7mA8Mgq9WMQIMdpdxBwRHdqJE0E5JZVqqPlPWle4UOOrCUaDKhVz87NOSMyrtD3HodgtYOc6avnyWHIx/+44UiMYxDFlU/jr2zW16h/sESwQ2yYARwk1JjVn8dQOSjzqPx9zhJCpprJYQpmdFqmEKL77lAUFPvAeAJMqPvmdmSTOY2aTWHNBmoCjbvA0Tkaz0k5DkC0n2WqM3qGiKHPfyFzwC7E3vqYdvwkzL3fnrJZ+yJmj6KKHUcWQY45ADdSmLqh46coQlreEYvsUBmNRneIXl5O0Fs+mbW1nUu89Do60vVbx2FlYZOevi/YeA3RvVJThi0IgaKgG5nPdK3k3NwfPXNj3Hj8c1+cGAcn3TD8XUjFWCeJxzrd5Xo5ZDFv1GK0Jovs/izys6iqp0bTKZt7I8Z4QfBdgX0vnl2AjAw5a0K6/JfYNAhbJHjCV/VBnl2ac2g/qgoV7i4v7fon5pqBN8sM6Owagm09Xpx9Y81IobOb9Q/Zni8+2Z4gefb9Ku61fwFyOEbYNsJR2ris1LaQ+bHQz9SNJz57YN/GvgL1SiTVZFDQnusUMPtkNjoGv3GscYw5ZXAcpUbbLt25oTVBvlalqViTu9Kt9sZ05xFvkbXVP7ovBeo/WJsvZlDj2XH/F6IRnfSWZDCs5MbewFypmIDQHZIuCLhUw04JJRkoXDs0/CshJj/CDLKJDK47X4jGVnJVEmzhbK1jiRcaiSeWmipqycfT9GiTYWTTOUEj3zR2YZmfz57BRRdYq+CdJBqZlyVJwCzncb/ur/ZCAVCRxwiv9USqxoldXgZrp34ZBz9gu1eujCtWRlZU3Zh+3nS6YJ7x0AfgTuBKq+FxXOtX2dtXvYWHiZPXy33zdnDkRxPB8/4YJj/0WYPCX9+DNxFUxpQfHHryYnt/lO56TMgZhndj9tAqLPxKLGHqNUAi2cmGHX0fW2Td74ghrvv/yEW5+qa6U4If5WEgU4wVQ9TFgqERuRhTX3rp8jZkdgOrwxLuu74RFZlmbSu4jevmYV1zgaeytW1+rJt5yLubSqNmfeAaWoGndNqp7UprVQlnTdFqkrkJCF6fcFxJcbS9qJdEocUC2gFwnw7PQYa4PdpTUNppx+8ZPvrfRbFIDpkSCJpRiY41ZZjEK4S7ocijCmjfTZNf5a6Qpk3EEDi5p6ubB7D4my4eC4bLJ8892sCXCXIumDdThO+qYCZ0dP+6H4SLi4bYf1gG52X9KrUt/1otjFGIJtgNYDwk30kfFnVvxwZhgyR6fBKAZzk22qoxGpRZPyZjS951frO13xyLbn5o7P/KFppcczUTzqst7XLid5mDZlOqKZDZthcm/1S5rgi5MxL/MSR8yeAC5x3+8EabIWoBMQbdNvG3fLMwnrqfh9CarURT7ziqhjBqq2ZU7rEkUBiG1cd2m1L9mUtgJNFmXh1L4wcGomEipIiWdO25e6AWJmUnFTC81K+Mop7HDzAvMNwHz3wc0IcIqnqhEfBQqjRFfLsODPlWHIfid5TiVrxwdchjGYOopy2VbYxnOzEuhUTSRVaBRKxRQRdrVFgVYSq+JHsu+fMWavckEGRXhtwdZfsGsnThQYOFw9ENibCxkPNyH8F1N8cCS/v9YWYJAz8KAhFv2SUn8v5xTp54Su0MKsZrZlFsHDnmg4D1jY+wg6W5s/GFYCNUjFIsV50weGpIUAq9E3jqoniMELeGOpXnXAWKEfcYjcjsXhrpS21S0DSHN8cUan0oY2jLGKZgOHNaPVEnUoVR8diiqkVtNIxtA6+oZZ1jFf2Eiv0Uw4F7bS8B0yCiym7WjgqNQ5aGvHmPiXZ3G/+JsSYoCy9rLI4AGHW3pORob/Do5zCDP+Kps3684MwwvpVL2tH33TK/etPI+xvrDpLSKVGXI4b4lIv5cnUp/sokRZeDBIDyftzyiPlgmTPTuitstJnSGLzvtr4v0MAvUUkqgGBp2RA5LC4EPH45a6EjZj0Qx1A+rMo6jO34N4CG4UWCJkIFUBZ/O32umMf3/9ax0fTj3KlQDVKKRTGyVxWmZherW2a3b1acVarWnRM2dWr4qdLCizxyp7FR+6kLjnna2uPMlJuNARL5orrbpdaZkM9osoqD3ertQsZjnf4eWaYNFi4fOSiUPVgzsSv84eqmIIX1nkshg4RCz481lg6raAxSl69t3r0haKHPSoFs5gTHal4g4CR14cWuITf+Dswh37udBVeA1tAp+jUipxG3BKqjzYgYjxoPsoRhbiQJ10KLkWg4vzi0II5fP2vYZAOagtNs0SO+nvOODmEw/9lmrIOBiBipQg2GkUpXiI4OqHLdmcuWvdPRGh60m7/ynx8AgYzXfgJ3TpopBmzPOOnMisvLTdSWlXxqy5S0GAlcmy8j+OIWaZLhvoJgKK2NsGwx6ZjQihb4mt36T4hex1r9KGp9w8OyI4buUzzWsPHVCJ+VWkHiYZ/cVYwNlLW0klQzg3wuE+eg3kk/wsFjqqWEPZeKQIkFhEM93vbyypSVdvxrDgwgZQ9R9LIQuDLsEpbFPn1AL0nZ1TaeBdFYktLAZk6Q3Ktqc+SsUX3S16alyGiHNnU+0p7Ul4AD0FjchhZn5i8xyELT5+McPKXAolU0BEsHzKFKJWd4jQsftyUWHxkTW8Tq8NXyCeuJ6HMvvug3sdErjyoyaEwlsqcNhzqKj3SReFBUBFDRcV/1HyE4bQqVkXQ976Q8asM7dwwhj/eW3wyW41fzI4AKdvxuYZ7GOgYO5qL/SzATkwgQ0ZZyO1PPGtcvKNGyOaO2uzsTkSTbdFJZLRsNnKPqLTWdJVJ6JGsb9KKL20MCXllBTe2Qh6xMCQvl/5FP7M+2OWqDZgvNyLVNxIfjmZnvnrwyLcXzECgF6MQQxoR5ZNsWsCGM1ZtIxdzt28LikGL5X6kx2qp2YQvBSGwV5maFFeshxf58rM+MIbAL2xM2JsK3W/odmzTaQkdeGx1BD6hMiaj7DnNgDFvlGOe+3Bk9gDDQZMD81aMDbPNBPStlfQuIHJiOZg5//4cbkw2U+Ul+jVZsASTpeK24fqPl5zlwdujxiZcI2aNspqKENiIHDox/dBQRrQnZONlCp5M0E4o5KUoNIjEnRomB+jkq8WZz6LOBtYxFJY+oHetUnuYAkyZRMLEiY8erSB0k8ffKz9/tVuNXI4++C8c7AwzezAiI757z4f4cLmTWQfss5R0i+5w3TVT5NIOSxuVrQXoNvz3ng4TEFM+F5tD9VT/Gfhpb/4H0fsft7Ws4HmYRvHkvWTihwGN06wVOHAIFwf25Tsb2hbH0m+PdmuDm2JCdGiU9JpZEVvt0ZISuwKNXOEdrduWh22nDN6utYcpmsy7ivsDvujHZM0m64yFfrbHVIo45EScMpA063y1OBtEDncVv/dv5/LUqgn5oPVTgXailJcBIWj4K6KOj5SlE6hbAzXktZyJNXXfstKqzMdmPdnrMF3ZzHorX+AYW8ML0SNHMLsLEfPCP8CSLocquf1ogqSYve8FehPBIHFl2zaX5nL6D0aSLuz3Xbl+0jPPIqmfaqrGrRLS/dU9236e/MRbXO39zFNpZc1nbu8+iZ0Uu5plaebxS3Su2iQuumVp6+2U9RFvj7ybWDuALnFKg4Lg3/b7VZrsuInaTxf7qj/V+TXJeY+ACqJbtRjXx4tGTesRRA0Jbz1VHMXVnDmFe9LPv+++Epiis9O15qPxHf+50qTUbEnmbqnK9k+Vw2Ie60rxo7VCRyhh7Yed/QdSXUOjTQKAjBfgVsSL0+WTDdtwNMPfcpxTIWZNf5qxt4IjPwzk+uu/QqCU3npuUMd3OV6MmtWl4q09pP2pFjwYF7dpp6rsSzfMGbpZRqFmMBIQRZrn+izkTAygTEzLMyWBynFT2f2WwarUTzWUP3JeoGZV3uKVYeq/WUbRGDpOMSBJGve+BpCz67w5h7TGdOnJ6AJb4ypEzItD8Npik02XePNiTG3dCQw6Zhm1pT73TIvzfK6YBgzlxB4q+BlKF+/VhHOqQ+yw1Ssr5fPdR1lK0uiVvSZL3vl77bo+gXEsofCUHOWen1eOQyii98azIkSA/BixKjsBHzn1NAu0YVAPL3yOt4pKILLlksZcbUlBuFsRlxZMdPFByMX+1mxJUmp/l3wkePZ9VYvD+OyxnpUCEM+UGaau/EBdsZ7EhOeIBkSh8usGA5UQcl7AObH0iT9WVo64y5yKjiB4708Cl+zguDA4jqZE3xbWWqryP84t9nIMXur9Oq/ejiQZ76g2B4+JSHBCSNVxUp9HTYq0iN79uY7EGoy6DDoUd/g9rtwoxm/tJQlbJxgitl/tNTQ7JMfbEFo3A8JNUV7xDZRAOyVF9lYceFBfSQxddx/YOuZUUWIhZd1sHLsOjp/7Ol6n98FdkvzYhUaZLygtTDjpEerzT+U7xI0Ux6CLOqE7m8OZreC8FiDxhGtMgv5PDLKbYvaksJO8iABSA5ZCBmd9ouoIJt/5T+/D+H3LgMXem7LGj1IYYBuljhqZzArTq/1a0Xui/6aMoqdMM6hV79ZKLkPw48S8blwNnYtsC8PildrLgYUhYQs24lzkSvFXXd0ef8RGdE06DiFzSaen3bYV5brjki0tpt1uaGpdlPc9R2WpX+K1D/VZVSR6+G6yO2jp9i5STojhrieCzmILbFrJxp6YSJPjrHkvAvKvn8dfiN1tSAPDzzaG7OqeeKU0lvV7krq2tqaRW8dUUR4iN/fN7fr0ObbUrgcNQLoXFlYrdZ+LrLXu0/FXI39IU1gxkOZ8uvlg7YkUFlhDmoAmcuzuXCuVX9XyrUXOGpJ64hLTeXIykbIIQiOD1DrZXpCBmCBUfuCLnaPp5nYHJESYDyf8lH2DYLf4YP+oHAcGjXeAoFoHFnKCYJlUivMaW8W+2YTy4rgV9OfWLNpRzS0PtXXM3PmeeEvGSAuzffvv5onTwUYUCW66VfWbmxoBay4wJstcXwgbxsAAaKMhdaOodVZEBPYqygepajVFd4LoEReCHWeFMlKs2mVOGSOkDpOC9gVeTzo6Vzn8kZaYloXSbDzPvlekK0F8VQ7dYIGRwDiIr4Yog7ssUipLAV09BAIJ9HRwofZraLvOMT/cxHfjHx6UlOewIltbYKbXrjaoLwqcP+rmVIfu4gA4+Wjl4gTRsT8c8v17omB2+nfmlSbqVyP8+BpvY1F3zSSwiOEqDfUXc9atzBvfBpLRcKyKp4Zg8OGMMDf4lHxg1JIc6Wmo3a/CDatKB1rUKQ2Vs2yPVu3GVt7Ez1601/GlvXoDJezRzbAwuccRdK5sB8TbJT6l1MnNbvacE1t21yrGOZvrkzdlA09xavQluAX8Ct0J6Ww9hYgj7XMxzFdqFbM3rKO8SlubqxQ/+ktGHUPFcJ4m5LF7Nk0hCcR2SLZ/2jKGksocovck2cYeYfDK93gYLxe1C0AUqNvYtkz1HlepKdWtQSGIEAJdxHVfH6RjsYpz8vM/9Udc2zNz5vUgL09X1z7q99k6o1CZeOgUbDdHH8GBuPe04UkZidK3q+3JQOSeGdoof/HzvXJCdL9zmIYECaEuAjWL9+JwQtYz9az5v2NnVaQJT00h+j+8xZlCKxHs3lEsEMnFnQx2UmX8VPOkDNk4jL4StHp51ENrzSyXK4vlYNfUpTQd2s48LnCXZPbeRjNKF7J640urI7R1UF4auNAVU17cmLhDm68z8WY0mgW1uGvFkUX0qTZfTAhYwsm3lBIWxgF2oDCzN8lyv7LVamyh9HcMXOBcm8os5hRlMr7F7abPH4+dYFhUnhTdEWjE1vqYAp/UEGb3/RvY7pqHiWhP8A2gJLOXB6OZn/ocMvr9QnQ/0SS9Kt37g7Jj+J2HgIF1bvRCK8+0ool3cVSdxLhdce9UyXtawvz/wjEnkSlgHzo/M/ydyRI+k8f0q8wIdNAUNerYVgUOL9r/5E3IRgxB+RkF8ODXoDjNUlduBLkbMp80NOWMXQV6dnZevjNenRCXcbajAx6KOu5OnwZYDi9wcBA8ynfgyZESRjl1J8vX7PoQfgcqHY/LY3zDHR7fd9fU/l//rxQvXwnLLLNTug+UpPtIrHANNf/WhQyRp6asPY3D3tav1R823+2PZBM+cT05CM+Dh5qREXAgwmKRMwVtBULpMA5c4lKXIyjmwvORgG2EkaFDbXtWWYMYUqM2g0jIUexdwcjxMBXZCD80rfKteNAVmt7TwaOTJ+tbajayR+92NdNmxk72Q9bthJXaZjFtK54V4Uz/omagF2nmUC30K1EIjnA8F6gWYSlFRMQS2J8ZbmXTa1X9I8ySPdgBlykJgc9r/3K/L+xKvMw56fAy0GagsrpUihO0NAhnewS9pnKErqpllSdn2KsJTvNKHIEB/4aEU9Jn0Me3dgxjFsxYt0M/HShselfsuCfDmKsRaURoYFq+z/XCFEaBZzr1ehwUuGvUR++zAd17uDZLf6jkiwpifvnEDaeoWWljBTJaDHXgQTaToV32ZB/Qvh2vKc5sCH2TZ/XfBTgFvOUvy5w+YUVZgMdGvsaFipIaV8uRbDhYYihX13SIamarnGISYZdM3TWhuNeakqFHQhVhWq+Khalnmd7CPd0F5ISi7gM45R/Eh0F8ENtj01anEfTKjbFzMgfl6izitDYBQmlK0YIjWNUm0Gvcf3aw1rC2mWplTzZpcMVH4lMTxGIkrkJTZwEbL2x0zGJ5KTrUnwYOKYwjQvJ4zOYSc4k6wl2xdYwSPnBhn2T88r6JXjuVKwN1RYK5EVjbiaBQ1azP96xkzA1K6Ko1gjd7Z84KquVRSCoA/z2l8tdSeic0B7k6N609YbVQX2M6ta9BJCeODp++sH1MNAJBMiMMG5OXnLjv5oFgLhw+XVOsvCwoDSztZLTmVDgcO84GfD8eeX78qelHhBXZfBLGcX1SlalQHwDedHKzYsPNEi+Qj1q87LoRLPBcKTiKmXNoPPHgQ6Smw8H7i1bINXqlt7o6lbn6DF5Sgviwyz+ZCHUa8GTYsbvAck2TARaAIGODpTMRzzzK8b9v1PV1neAE1qV4JY+QK/kqAwaX+rHKVnPPByojqol40oR5OmeFVBKVRVjTQK0s3hJrYNbEKmtKHbLa7M4Lnl6QaJvaQgyYXT+Mgq/8dqz9LP47LtfOKLUKvzYyHvinsxmaDREhc4kRQ0yM29j1lpOJRZ2Sijdc/tfi7afI1OC5K5pRZkhg40V22Regs3tluwekSIp24ZPWP0NIFVSE0+WdDf+T7ubvUI30VxAwnvseENpWpCyGjQ+FqXgx1AY8Li5vAEQ9SQYEvNY9WxvEwP0cdjhOnHG1vNcle6XqYtsODr/r6wssznscaqzXDOySRlCZQ/akm/FkvJlHCxcYV1bU5N2HxjPXur44UpsbLDVvcVMyKJIQXIWCqMd5LL34j3nTJRKYb7A+O3WDvd5/2W//QS0IPQWzRhl3Q6TSTlzA5uHZjfljcCjKbQ2MOXk5veTh3/am8BhF8Y4BC2qOKcgRsOVefpSlOyeHQ2MI/4oF+5uOTuT4TTziqQX4qU9mm3iMtmnBCHleddMsYKjUyhB8VtsmwVjBGYx295fWFtfphFvWFrsS8smrW9kPdXEeiinPK+f6WSb9geC0s09cb5WydqbP8fL3UGODm1f9WSgNqX4Ytwk+LPpG4CbK82MOvTDBIEstk9fNxg0Zjc4plk33xdnj4T8E+RbbuyvsZ/+O+bJTIm9LkVCSXAYmYZO7skS7RuE9Wj8H7NTBRFqPDzUz4kdY+gtWnpjgFwrnxk7Vpyp6lqeK5+WsNxZJXNNATXpDt8PGjoOuTtUZnsp6c0PuE6A2VeW/QWIRoabnzdpzVA8EoZUd0g45bL7d2WqyOqCVL9bvNbQRdmWE6zCltJ2qvWnK38P5OiOSC+hiirLOIPm4nO+WSwm/5L1tMn1WA6t9qgWw0pG5GauJZqRi7N3FHBhCPYrEOW2T5ffkemeyWRGR1h3CwnW8EyIWFTyAMThPJbs1KMb31we8OXLdMxDOGuSSf2i6/rNHGVMRrc7ptJty3oYgd4wJg3uxJZzssLLFR3S/pOw4jVPw9ucRK+oO0aZ2id2ixoh6I8dLiJik2zNfGA6KcJ/rGkBrR33wqQf90XHGsGuWsqW+7tkzddgBNV+q4TQsV6zYYRGc5Oz7H+n2gBiHggAANLZt27Zt27atxrZt2/b8qJnYaGzb3EPsQZ7s5dBIAV2yX3sUVbjlAvk6TA9lOpRFOG+BbkbRpRysc8Kq+/pXJ5etKidMx18PWSJX7mfGq5aJluAaoBPJ6o3S826/4hM14qo3sKhqRW6bXlAfDC/qCbdinvaMGytBlLtmJSHa+zXQP7Yni+TDCxhgQHHHKN0z4Yj+ETvERKc+4OP+HFb8gJI23H030I9Ew4JXDvaMry44EILsJlU4Brwmjtg8deC3sJR2WkK1Pjon20R0cxPjZaA11gtACMhcOBOAjd9pIqDm3nsu5vv2hl76u1zZ9LfEbsJp3ECxMra7LHH8IlomRoh65I6BRqGq4pQbyc89fDKrZx4+4L9lakLYcjaZa6cvAOgquM19KNAh8MCMn0qOSmO5/+VlWjJYC9epp3vJFViAdozwGYjPwG+2g36IkHEU4u2MWYVb+Z9s2nvRZQB2KxjPd8vNOZ3vbfxswyD7DzrfBt7L3wJwHItLAYb6c7fFG7uIA5ehOcv4+rKEPLU7T5WeQ8hhjPFQK3EMcPfSNoI+z3eBlk3qqCyV6hDmI4kwFhtw+3b2Nv1aXCO3ghGVR9wcW5VdfoB+giB4zKn3YnDcvTC4akUsaIQF3FDUu5QIF0WCWn+tpcltf3SeC/0Y+PRUggfOxgnRFAinmCeObobaSnOx4ZF+Muo8VxBHQ8gs9hAavFXj00ZboFpsEVMh/pqB2X+xQV25jocAu35MjlwISNyDIeG/8oiFhyUNoIq2qyr6rpyhzn28AHuDAva/F+blSyYbNFLtAMsRpKMGyRcGYY/APTX4QfwD+clXEEGcfQxjWT8ldbzxu5P8erep8OKqBmG6AS0jBSeqgj9SfrLNql+cFx84YzkVGvfzhUWWURoKdH0+bB/485KfefhMOnh9JzWhY0r12RH4itTVjfwz78BMtyssINVQij1P+zJsbnH7ghD8Ob1Zfa+xFzcDJ4hOst+Ss1ao7KukM+i+NYWSIfdjB3OLpnpgfGDeh57LBnG8HFDGRf+A1b6XkSm60YyrCWyJWE4MO6XnpOsmYd3ye+jAmhPqoOGfP/ARD7m6PkAIizEzCo6D2cVWN6y0xujpB5hvo7bUqDCp/gcsWIdMpANPrSUUWyrCJmecl8nXzQ2r0x83bGrL2DHPrmy+BnETd2hxbO+ITqzVoZV+W8zgyOlqJFPw51GOhJ5hj0HVdcSPxyuVOII9kUhM+Fr2qvxs5R1J+d2fWHCAVle0bgEfldr8be+RDz2C89rH83Ug7Lgc8xTtZzJ5/fcLI/US70HtrV4e88CLhTqlcWcvLK9Cf8zzREDlPX2ztQIhORPF/0+pSHWkv5XfRDk6ZUPPVqpYehEpgWSgM6WB1IvKjJii4Twx5wdmGRfxp7O3+hoiAlizVlxdQ32+lgq509VnCWbFfzgFwBs2gf3C5KZ/dIAR0epg5JXB2t29mNFKJKuMGw4PgOIHuzLtO2BFtd2t2+M/aPj9vjxy6sJnt7K28t3wbPPVlgRh5BWzA3cPyH0eii46+lN4xjUYtpo5QTuj2NJojIE3JCqwAYqm+w+35jzNo37N/lolCUEXUhn4HHQKdKg9gYIT6ncyWR2rW3QXp2drAk1WJphXv0zR/vgN+OSRK4MyG65ao7DSEsBxPtcvuNmDFHKH5wWZio5SvBDmaeMwWK0SsDNvttvHNwQ/69vrbH32OqD4Rdb/Hi3Tdfqr9HUWb/qpnnc1gArWRtauttlK7VFH9S4S6SFpM0HFhCM/PaPCqQfsuJd0uuXkbx9SCqK9QK7ozTaCaHVC/jlmpaLpxPKDFrX7oP7Ub6f6DjPyPe9VSLKyc3joxZNE+IBLAMIyMAlT6WlX0gGkYlakFIIjXtsusElBfAX12z1Pnm/HWxlB/T2w3VLwXKCZmKab3B+VdMjsvJqYg09gRzd6Vf+vDD3l0CQCQnwSe+AEhrvSx4/XIJ/n9actPjjg9ywqF4KoQd4vPaRlL39nUbHXNWEFgpHrUyrxl22vUchDbfwu8mfzOjoZZ7oByq71T2BBNp55X03Upst9u02py/j6pXVYzjcKr6IynlJMjS+LLv8XN96lEJMDukc3MPPGoOrbz85EMO8hQjH0cmX2a2/eFVUkPPNv0l7gH7VLjrZ+4dPzntpsGay7VtHaLGIqjAhWpRxU0JGVwAOruj7Tg9nID+M//o+HbBhHUqXGKquRql7U1flxyp1h62qr8gLdgbnB/eTGFmd32cYHJE+mJGtB9nz2MGG/aRdHh8gkdEsIuFypNc3ElA4p7bpNulv63PFy9PkpEao5ttXJLTcnevWROWk+ClqYcgKOIqjUV1mcfZDKagO/F98pg6aqK7mFceHzH/Qb04nr7MDou05o3Hs1VbGSwVZv7Bjr8MXbtzzhBTSaLflSMKUBEWNuUwoZQQ8lXoEqKc0zc6PKR/ESuhN9PHyGBKafp6IGTyovfqcmn6vvMKOJ47asEW9KvpIe/MrXLDAqXK/5r/iqu+g44Zu8EMMfhOw5UHVlGoxgZ2JmfknAjM+/spkVKh6x+10x9e7od72u8EJZZPW/hsEvWWCHJ44v0iVCkOGycIPzWEgwbf6Q95jN4ZduIjSOLgbCgou4Ngsae+gW0ZcI8ILnh602Vn2rw7sQ90/FQ93sHJ5ZHYHzFNx2HB+ZXr62A2I39qNKVSwJlBr4c5SXl4BtMuBd4w9DOhV0eeyTKVbe5ahiwfCfQ6wi5SzpPM3nkOZq8prlge8tu/sm1IKpOzlCG0ibo9/LdbGp7I0PIk3bvzsIAvax1ZnZvDrSoOQF5qv+hecAfuRK6lH3XXHzPvd3reIjByLbqX8fLL4FzYCi01kFIfZfvpj0+i/nxrycY4mNVdRvnL3VGq0/cWFp28uafO3OnDZzH055M8lpdwd5ooIxfpG0mAhu/D52K2a2uRMqikAJ+0TPSeeAmMBnbJgha3MkDx50GCcyljwBL8RiabJrlJQa3IM/thuNSgR51nzeeIPJPtT4VIbt1NqIclz9Fb7jlHDUcIYi45p6//FYBnUri0SeXrWU9Hw+kFUOjGPaLhNIP9cuZE+2fgoYoQwy74bClqvHDQkNsFxQ3QeCRxrfq8IQntvomkNtm6MoQT1q2OP2wVKR73v5TqIWyLq9Jwrq4cdOnzrb86ow5bahtitAOUZDWgRGwbiCBrLWeDfy0cyDFI92P7Xxu7gjAEgQ6SNG+6GWVQdr4FqXLMk14l75GPpgEA+hBTIQ9RoN/VQCSvJkg6qCJIjjQOPSmwfFUqjBSwC26Ywq10hfGLv69XhBLJNKCXy8YNyKN41kk8IZBt18i9DEgWRaL/uwwLX+yjXCiVYJjD4ubFTLTuUZ5RwscAaoN6hXWh6aVXOQY3FSH8/BmoQmdzluuG82SVQ+f0DJ63F+WhcNIvyUU0hsxLGh2i8xo8Rv4LVFhJYxTrPtVpoEGgmMzyeiREI+ZRTFKk3XMcK8DeZHgbWkkipD9XwrS03F+nUEfqFtkcHzFg/eH6l9xwuFEhVE897ubJ2EXdynrZAgGGPvuFRlKNlvXzV4EIbHD4foDNmIsMUyHrpGRYM+pwpPcySRovMZzaZWK3G6qNzw/4y3fYHQhp2Tk13OqvQg3N5bRE0ggtOvC4YnxH5gJGtjX8ZyT4iKKW6Vy3xF8C2Mf/5zvlGUq/iN+YNjD3+XTIit+J9ZQ1pP9+E/ApiEfackI0NQLaeyFUVH+9Urx7QpmHqqoRxUL5s5nFVAVl9wnMCHsyb2898mbgWVf9/3s6hXKWcGiII0UW/G/wXm31m4Q+42t7IpQ9JtgHJ5Vn3x6u5Y8M+3dBMy6T9K3TI0/ERp0rXvT5O+RHPx7rib2YQw2VzbWratrT3T6YFocdxhjOgDc/CGfhvCUyQpJNLnVpJJU9ygtMfarxsNmcHsoeCnPmGXdVTb1/fK0pg6I+ZZWC+lQMhJjBZqpJ3lg50I74bn2Lz1nUyRQO0TSjzgqBB69fhVwi+I9iM+35whTFWbOd9wjrVE4EBPck3yWfFZjhLsAYp0Vqh7BcTTlqk+ZvM6bok4CeH2IT3x6CfUnE9JRG1YihWFZa5RbvrKdRdOp3TZhsSEwgkJIouS56zhzMToRpFkLFiHsw8/0Q70XYJ4eSa0RUM32bT0XZ8d9sXJPZpN9ondJEKBVpTbTUDMShl3lhdcF7rF2V9PRibOHCswdjWeGrXxIp/ctIEEtq6oqUyVQxWrDMnpWyO68lp9mktPwRkV+zYX0ELFDPFLDMPXTU2s/UzZ7Ni4uzN80rpO+EkaoWUcKObH99mC5b6KnDTYP6M/ItAgoiXZxv5Ph+iOO0OiCIwY1Bu1RCyE9tkRzf688mybbebRYtIzdtIojeVbTJFBhSu7NFoWeEfuUDQWVfudOHF+LHlUbiGnNaVNV9ppxqkOjuKrVscpiMyLbD625DrkkHabtismP6shLLT6fTro6/OqcD/4v1VrDPGPKROPkVnl1KDJM9t3PxGNiUwjO8mFG7+9qK5tvCuFW4ZYt+e7vS1Msa/W+DnQrxNQjHhpOiuVy4fU/gqia3cPuv+OSZTkgTNNELnwnR/0tb/5HCeK3nPTZLu2RaKQTsXRveZH2VB7inPJ7ZNxGz/NCXrtprystwwUsYbwB+gzMgGsgh7Qx4xP7lQTvmO7yBZeU6WtQsgl7oz3BIPCteVbFTlC8YzC+iqeXNi3Re5TVKcD0J59/EJBoslCO0k53lOBlDoilLxYJwUNjhZj4VzT2oFEa1xWaeP/UigwSJoX9RU21GB3YdFcwJtDeU2rOQJZOfxrT223PoE2mZ6uSuTfkcIF7QvNtZmS41LI52fDtx/HDwnlP4i6iv9k0ihZsTmok0SSoRMnlBjJM9KjdhTa8tDmSubHq5hpVrMWlRZN0/hiwSKsJRGYMhcCYx+flsb689+q8huQsEfRCyDzRk5sSK/F3cG458OolcUY8yCJvs+4VD5dcYxpXi+r9Cp5q8gLj+OUbFjpVdw/17FtBDqhuccrhzTxrIZrkk0aB0YXTYZOJPhlealVjti2ddJi0frgcp6u/TEvH6vAy4keI7xrStqyEy69otXYPPP6HuTGN9eSEcPJFaGgmJDdY9qjTVA4MiHXTgatayusyYfpl6o7/fYiSpUr1X7qjHVU+aabZB0Chs3td0Vkz2sr3+6qMgzNOJrttTqBMIh2OmkFgqwJQpfT4DIMmvUPaL+S9ploJxFmU5EnYOZwrLQMpkGxOhCgc4LVe3/YhMlmBMBfZsHBA9UYDHf11dyhSlFUVx490SfOTN/B6u6n2x9ZlqIpQLr3bAXJFIOlub/RdP81+hWoxSWTNsUjbXWfk/h3mn5wlqnX71QEKZUzLJGJwFPxbfjwLpYhnYLF60RLTCCwZMRVS4SBoRbZxSgUQqXdjQh5FZ7YGLSRfLI1v2ddDryBZRoBWggaPlTSuKMaGZmqNtEMhsmB4vQlcPP6KMygpMHqelTXXFaOUexiufkgFOUF/ei4/rkW59saYoTgsX7srgVTnWxCtxYNA7kt7MCf7hum9KH5dMnPt8ml6pxx8fIRFs/TbusvVXVtDs+vMVUrLWhQGnIPM7Vv0mNFCHGKCjS0W6262uvxblS9R2xrqT16xwaaoD2d0SH8iFm3hj8aQjQWBCCVJDOm8UlF7w3/CXPF5IR1Q+LV8LYJ8U6PDRvzFat7X7xk22cyYYacVSNonrKmZt1BoElfN92yVeiV1HpwYvs9odIrPu7PZWeBooTo/k2sVsGcfBV4ii4QBdOng6H7rxivOo14F7c9ZNHDqqlpSmyMKPaDXHvnO5FtobKamAyMl+uQGwNNNBi/cpqihPWyMrddBdBosJ9R/vcDEMuPrfqnuoHtuoA0Y/GcDS5f1qghD1FswL9wGLBCwxkmrWyW2VdUabFUCqXCcNB8KkijFsoPd/Rj5/CMjNRV7Rr5qUbpmY0XXxgZ9+z0R1Qi1QDdaGneh9VsoMFQYl+n4+86u7seTfvR2ueyXyrVhvRWUUCWiTfZaMYsawWBDBVxh+sot2On6seFghefpi1iXcce4n/z6+WZCw4SXjdeATXzC0u2ChFOx10NiCMHibiexZb+n6Sf9Vm3OY+gwKlY4AgazupkVHLr2Si0huzqdEKZ0jXYWao+CbVRWaC/WRpX6ar1Zg3QgH/ez3Z5i/N7KaFeeheuqCrijLcx2WOyeEmhZVTGgcXeGqVQNsOV0nZ+8u+idU+X1n24+IQd6g3luJ6/KVEC65R448PMjlXaCe89zI7CcDEH77oElYRXdOCBP/EKbXCU4XtxAMf1T2Rwv0yCcZn7I7SVZj5tOm4FsUszHqTEXTES7EBDaveimz1sKAqOChqj7KBegjMJ/4G1kNFMgihfnz6ay42n6Ptw90EyfmsF6sukwOBGxl+rPZz+cn0knc9ZbNr71gMxc5Gsxivo6bqzMGco2aThYBnhF5YxdLt44AUXYSL3tMWO5cokt6dUpsaFFy/POR4v5HB29m3kbgs/+GtJe2ZhOYe0nJGxXHFYCwEWzQx0i+AIItwJB2I9VWz1p+JyfHKrCZroMWJ5qhcaAiEfZjD9vKmD14lALoZbC96t09R/+n74s8fDd6x3lU8xMP6IW/oYqeVBZVnMMIaYM9dFWDI4MOqrMwsg7WSMW/pwlCPbjicbPHgwm4E/f6Nw+hOcG2kmld+caNS60G88XnXmwDhCdEOmgGewkQqmFy6SdzvJQaKbkiGU3ddbKOmdv1tYwE6/sOoHxq2u1gxEZoq73S2Rlfk8MOucgzL/2j3Xfh2/FRCzrdG9vZekBr7sZNEvuE11qMQjoR1RWq/WbfJrvFYBQltUXUmOeDNPi+N3r8/LEaPZ+wAopF0qNGqVkwE3UxyzOluaSFrG1nlzBrOhsHRBHo/wpnTAbeNEXf1aYwemPDsr4I9RP9ef+9RX5BG7MeR3dMQllaMLULVgYQbhwRTZLgAh8/GRrvufcZ1IRSJohArSLD3VTMLfgkcoLmLUAIsdH2IbEdrOYlu+riVpA9jORi2RuoUW3l7DqzbkIFEBsWbHK4n7RJh6EtnN8vw5f0nVQl7LJ5JeIZyyOT9JA94N5nUlE4ETF9A3eu9yh44G4P0Ytfwa/KpMnWWTdeMM6DeoWUEZ5BrX8cj56OMUeCmfHAal9DJv7ZztGRC9S1R6NmK7pvvXbQHd8desH7nxP7c3Kd89fo5YYUUXwDrWZ5z7lw2Lcgi04n00XG1TcjwtX4D/lnSgPGbq4GafUwI8+2OxejM9xnOxOieKAvhd2gFdXBJKIXIAPX/KxXs513rwT9BCnEbFaCUZb4vIjSZ0D3kkZVSp1J5wfHSEInypEXXKe18zotKgjg9QDrm0L+L4CY6g9m2ThnfiJGGlgJclNf8Km/uuljH362rc2XtuOj9TYS+9x4RXup1mM1pkOuvWBLfSluLcWyFn7LkNEJ0w0gPJi9w7hJcntOKvGAz0qi/FItEnmbyAUmHAeO08Ht5Ri3pQBrWVcRkibG1TNYBjMi11eD0O0JJ0Xd94ljn0qE4AK9iH7vmJwDIUAqf+pkUFdB6L6hX8U3tlSh2YapT7CtRrG77PM586p21U11nlDABK2x7M5y+9tdUmY4alcZqRCl2ivQWoTybrEEMoq/m/zbXzCxY4sgeLlhlqnSrWkurTg5XE9xR8/HO3SKx8ogIpQtvOZKtsEwZ/EGyVfS6gyb2OlhHf7Wi03TBjiVfRNf4VePmTkM8afzP6xC0ULgSaUA7ZB0+WfXTdonS29E6fYe5o3KjPHFI1T9uZN0AaHPuiHWWLMqdUrRiGqwexBIFTkdCjm2tVywbxzxpMaX0giuSphnJdfHXgVN/jWDhyos22RBMH7OfLomgvr7gxNgkLYEcRtAcbj+AjHHn1zZmW75DZbPBi+JQH10kpHDfyzM1GzimSd39aNHp3KVORmxmDE/xvFBFKOS1uXRrEv5YzAVgfLSV5Cbm1HQsfa4JOEL8pv2rJKdPelDSX54YHebdh1LnqTQs9QtWo8pn2doZX3eyHCYepHa/secRm+BzaGwEmB0trznQ22XHtS3gjPfKHYwDR/bdHRkpSY1eirMZQ9kO8vpyOpYlZ65TN2RbWLwsm6llSbDK78nvoH3AdY4/HltpKwblVcB2ryu9G4xo2jU7Hix0Zmz0t3XVwr6Z0S+pSM4F4W2e9eYntJqbh86d92FfTqaG6KsuT135wQskYrHswLHpnvhD2M5W+a9YNgtoHTWkUAycE85JXQRj1rnl/YluEzzogrReMrxzUPY9GdxahcQDv14MmrEcJAoNc5yntF2pcjtVJPcDTVug/iPqfvT6Hq78fIhg0xl8cfk2LNU6ith4ed6ramPJSAn9JES5cF7AEd7TzcSF4KfumjojwG5hoxP3WexHfqE7ZMO41vted8EpEV7iGvOxZqpZz5yc4RXr9CfZkSjMYJmt5GEyFnQUo7AI2uvMJX/DS6xMBFj1oI7Kr93jgLbVH72Jxu43UrnPgDs9oQ5w4Jeua9aT+7F8Y8UvpZYGJKUDpI9rMx8CSl2Pw6k4odFfi6PhwpBySLzoDBAfy/EVXqTOx9HclQ0KLdWjdrHnVFzwpb+LkMzctX4eWs9p8OkAhMOfUkKCG9wiu+s/oxS8PgmUx8K8w7OJvJFmWXfQ7SEWiI6d/gPgVs5aOiPzGWG+UMCi2V5uGPmHgAQpdvXbT9H8LlTy/aCUMdXMCmxt+7ldjwyMDcz4FR7rJrtNwj4W/cmHMhPPAj9P9YWqBgRb1ShlD7kzNUfLg4fwgvovjAKk994zG5q10xd6HTroiBKNYiwa701Us58RIXA6LDLt73djbFdjm3LSmVIykcGstkJxBGSz8fjx4/tHqU0G40Gs7evGZ91ZndJ+XDB93IYVsDcP7OQ5w5eN57WB41sFtnaoKZjqjkpUOXy5F3HhmYTEVcfp1A8lcYWQhnwqhJ57EzXq2RubYwUt3yGvW7U6ecIo5bRLejHsyDKQ4gmR2QhLoTV9TYdGpytHNVJ3RSjaJqiD9clf+5ForGvYhFRxi5XKItVL337m3+UhbCEEsJDc2v5zVBF9AZTq0tJYrp68wazsefdrTFfN3KnlPa0VnbUu9AAhTc3AU/oCy3nHQ+qWAnwF1WfEUyDKlr+1XVQUOc9xUhyycDcQoLYSdW25QuYp6h6JakbAK2uJlm82ccb905pCizpGcVaD92IeNM/jDaZweBH756ULkoAqvefHswLNezhiLGflPDvDNPQ89MefEf1vbxy76vtGNk4xuVAtWBIwyVuPNSlnD9QG83DOMp5C85bV8BrupvMg33VXx4kSE5b9pkHz7qNRGugkeDsw2y00CKogMc9qsQkehDIUZM2EeOHsw/dHZQITRboOVqTkjLL+w5TBQUt3OGxDeR7LPLdcZAFN5Ick6rlpKyX8LoTZ6qhML9BPrGNu97BSsnUL7Q1JtwhqJbqfd6egNxHwEmuq97BxXmsT36WLZ1hXMiyl/oidS0Z5VwdmaiqnElfsJPJkchtsDPvtRx/xzwpXITYLTSeX4E+ucjrH7szfKAi+H3grnYI7KYGhVRj9CKIZzal6KL82aXesj7Y4mJN8DrsDDVM7EDIW/79+ihlSNPKQdw34YgpJxnrXHqyIlAXKVouis4dfYk2yckMjFNRlQRKbBBZn52+yK63d1wT51J2Q3kBBd2FZaKqZzPmUg7i2oFuukoC/csT1KtEm6k3xCkSiMN/wSchGidJX5/dKnOh/ChnrTe9o291730M4HOSoPZ3JEPs7TISgZf+GvaxRb1QVTkRnAxAvgdJ4agfIQIvCJrqrk/L1dYjYkyIJIFM8ourIOMyk/Br+CE//NBbMBGgc1A7dr34vrEAcO0tCsjg3yXMRaPct4IOfrrwIhypv/EXLEJTcEAH7OwBKX6u2BG8COe4Zo6h1ZZB2SUI7u5/vnafVTMSG8bsRd0Z+scV13YS8E9J1jXrE7bbPT7xSl+lJ0p+/zXzq7TLWhTrfnoTdhwZfp/dcxnu4ZqSODFxt8mveGEJnYy7oQwqMne8zM27moyrvG1rsgB+QmOfjY+JWK5OCJVFaWFz8RTPEmldAkAZGeV8qiqIgbAczlFn7ZIF6g7Q95zFPGctnd1oXIA/g29+XCVHVuZmKX87j1TuZ6ZY0+mPD8gYZLWwVs47YCtxE8XO2PXt3LJ3TlUi+txpfVvGAkIdJpRzSc5SnC8Kz9t5jShudw2sW6dayxaM8epYQfAfKSrYBrqjeYouslpR47gWS0FR2Wx8RNuLmQpQB9qDS+6a/vmxG4Z7l+bpLYiO+h8kFfFfudNBBjuoByM03+Lq4jNknKskBSAd0Vd3VFIczbeJdboPZ4oJwJ8+3Qz2Ub84cnHz0bDKwXslY28uDHX/aIgFWDvkiq+wZHpO2AovfcASY9Dqw/uY+h/qWDRWIUqpmqxTvG5KiUaK0WzQYJZUsf2OCL+sAkQjqB+MV1mePYZXYLSu4nDhV0Oj0WHlyVRbRv9d9bEpeIeTG3TuOByHGkBaRIbhXGEye/5YaMTMyZNc+pQlOiuh1I/FEeMedG6iDAhdp0dPFRdXrqzqsg8mw16OZFrTxBVGMjyFpm8zbnu/yTtL5Hfy0Jpajimjh6w9MPIrPEZg4yzgmbKi8kRp6vm3iQZGBociOTTkKiBLIHMxxpr81PoDSCPiRPO6NTprNQFGFD798NvQTgKzcdNbc1K8sMwf+AhVAvL3FgOL01JcYRscl81ik+I+uSVO7qHve+ZC39MZOkDX3RVIMIox/OyyLCUitrjubHWuB0MZ7Fr/hLn/UP0hMtT8eFl5WUNhHyeEhprsJFWVJHNaXyRoBmvXJjFHa9XOtbaWlUoWbtiEosF/bt1IeN4236VaSwmT7ZwTi1djtfntPQ4QejzvQRVUoSVC3Zr2vQ72zNbmkxD4hfwTIHJ6NqjK+07ZUt5doVnwCkUqe310srCy0iZ7cktv4myS87lwoVcizL+kbWctPZqndBV0J09XudHHvvnPSX/qH5K3AUNDckDhzrN56hBH+iZc9h/sNKfE4wfWzrCkNx0F8M3y3kUIg6KnO14KSqPuI17dhsQ48VFKLuF/RszqSXFItZ1MKRWLwhH1kAxt8oFhbtoLo9WoTtWSEPeTK4Xqc5hRDo/w/MR4mfcGWrU5EYfT1oC5UCc2ZNavVDAlPDPtHHSE11z5pNXwcTyoBciKPoGKCWkAXUEig1O7rxyRg3fkmd+Qz1VMAPvLlEGQ09eo7EI6GtCyv2gzMjy6u4MmLOQvOqtloyKBrS/JCR+znL+7Gk7x1KKCNPZT2Trkik5Xiw1bAnRZZyIm9s08WT0qQ2sQU7JM/QJZtMrdXZH3ahAFQWggijBInjCtJrFwAgW5k3mLTeo2TRgkFcOMeyyUhV9uqXMJM2ArB1dY8HlfeVpB59rSooFUTH2HTn6GmjxoPXJRl5ZvbDQ+jZAMJ2EWwrRDszA+YGoXVUK0AQKRMqSg5kEVzBSDaxOXQXIS8cOKXZNYWLur3Xf/zNPOBKRpb5pgJWFRwJmYwYt2UUYLKxitrSvWsHDX1fzBQ/gxGVYGNpQVsLcWzgPoijMfJKlrYOweUD7DaaR7u2Ham8Gco0l2kv4G4PVU48bvegKXsv+yQb2FBCBh27U92imnB8FXqAb4K0q7tS1wck5n4bKKmkz45w0vvVEf2JrEppa3iBLdcbn/C/4aISB7pw5l3SNlKvv+X07dSWuVu4brZmcVJYtUCbzpeCkTJBp9V/plKic5aOqtYXpESHMtOAgd4XXiPMKqyZG7ZAjsVwsRjz58HqZM9gQanV34bevs2gjUDNjkdHqU/IlUiH1u4qc8tIVU8yb8sOCYyFWM7kQv3tsedzOjSTdc2thgNL2W5+36WW9JsCCs5b8EzDkxOQMA4ojxHPEDH/oh97W3kdkrM/oqgh6JI1DMjYtvnf/7w44qdI/YTO9sA21xbNlSoXSzZpOUMFiblLsVqViZ/iyuyHICYoJ3qpz37j0klDTeomhXJs8t+5guc79pan/1NUqEaFTwBxq+TpATGAMiqIkh9b7PmgtuLjfEPBHxGCbayT9T/HT4LtJrOsCOcJr2Bkw4PnoeMnlPLjmsXXH1V7cXLFhHnDdBifwPIHsBaq5ZP0xONpiFZ8W5Asi9mb5bSZtKrfHBjPAEzsNhXyRr1U9qv8gloMJdiso/GcqgOECpSSQ/Fy2FMyRK8T3G30jCgUhn/usfcfAVLJEZCBvycMp17R092eTqRc843boIqgMi+zYQUyJnFhRObkL6ZVeVMCLahqPdkxidWa8DrLURh+WMNtSWghqI1pplftrHobS3zAc+hfziMYlzHMQUdEgg3l5TbGgbYz79FlKkMnbOX2jGfCCBRgPMkB+qb8nPRD0uqozOJoQ1tTuIs3DVNsi5o5eU71aCA0kO4kH0mX01Ye4JOsR429oKd8BGXlNKmotp4y1Sa9drhKeh96rgtI6JiKuI7KGb19CkcBcXYtJs4MjQrPQh+tbdN68aWngAqry3BD3KuOC7nz4mqsTf8YdZlFiBE6SJLfAXFtlO8UD3MgZT0XtTLYx83edZhtV+vJgzO9cmmPOVJS+phgUZnncGH5VTePkfnX/Xn8b0tgEVZKCVlf2j0KdMX2C/wvKdjkUgXERvmzQMHRfBU3Ji+6LYtmMp4Vktk9uw+rCTz9Dvj0LCNevrumRv/sUWEiNRse5ulfdBHiix9Inb8WvACter6UN8MpMsbleVSM/Qw/DmM/qmOpFPpKe+0CurwYHFxq9ms7QmvqpreWt+1HGDGQngfVSRY5Du3R2+EMxCjZ/sP2gu4rRoQkfKQtsvhfEsWExAK+Aa7VH7n89kK/V5ghxNZrCKeApfHPa1BX0wxluNtgSq5WUuhtwQ4QjxlYbtWp90cF8yw5xMUxIyO6+3QdXsXUi06pWARvmGQjJJCWrwRImynhfNf1NI8rc5nvEq/+V+PMWILjuGmSlxL0ITKLslNvucbH7VjrFoGIvft5OASUNabSAQt9ra9c7A1JykFsIWqvHT5iJ6vGROEuqBRpfFbqwmNQOUsX1xImW4sRHHIyveyQt84HkAG7xymFF5KvjLNXMpnfcov3fJzymSj8XseXS2KGWOGDbhJ1xQWB1UCT54bgL+jMzDOytwmUG/0llYScNn9/DPc5tgESvqJ+b9kkfw3XeZu6JKllLdnT74eUGh4XLmbXopcDnGy+ovg9DpXzb5UjbqAErxAK3Y7dl2IBxkEUtjX0f+dNP/s66U1lWg6f/BwiXHciGQPoe0v6QXzZ5L7tUvsL/XBg/gPmqwkcBMPfJICojbZfVcCQ0IqbOy2GX0/2E1YByGC7zoxumV4T7A4+538PVzGUp1aCv9ljcrngtCH1cjQi8JsDDGzN9nbXY0S5kGOEI/cZ7pxeuDbzU6QhpmwhcO7TFo1GP+HBmkL3/A8/Vq1YCY+HUlUWUjWm3T/CB3oflRMOqjmvVqUPDQbRd2Mq3wn+WodGfENzMuUpjwqNCMLbg8hjTJW64oXGW62tvtQJhqXbTRrVEa0k64z8hE7xE9RN7CI2uAGrEvWQnddl4bEYc6bH/u4w/K8+CJsoNvOAkoLPMWWk8EbJvyU8eErPR0SXWHHxG5LAbqt5bw3CSpP3Hv25P/jg0e+ZRZe5RtcI/nnnHsez0nkFknnPiapkc/49A7rtbUeWlZmilT5vyK61lf8OFtZVVsSWL6zC0EPUBIFtc2QFRPMhmlmkHk56dK5Nv1HNPiUo/ku0CN33XO3NNCfo0ZOcMkzeppxHHM2SKO/g8kTnapgcAcZBkXx9P1KiJx1r/t+uHiVmJGSSKqRVg07QQAtxFP13jhUsxNMRmbCQf7w7RIHJDZoL7Yphu83aViJfpP85TSBdBoZmEBIgZhi2WPddHxvkqjRD8YpNi5hSljtbS0lrGOrH0wUdjACmU4XAoB123m4mQR6H9q4MqX31dnjBK0NR+7axVlHjZqV1vEPDz74isKfqzU2Zld052r+fv/vaKY6z5CoUYyVg5yea1uhtR3O0QahGNKVaYOrflJaMHvFRHILP9cSvscmC4xJ2qAMxyS3lzxkhcZW1TMJUtcpu81V8TiRUtXlFBpbe0zqc4M12HGhutNBFHpRuQGONB/YE/2BR+1VxQ6gUt/sBpZursbaxBqonR7wtMv4OZqoMi9qhdcG//JTVPpqJgLz+lvWGXq8wnPvsw9Og+7bUXtRWejPTjoroWRiYDfUwTKcuJE4fMkBcGflhSaMot6T6STfntO6G9gu0YzwpZu49A4GmB9ynaMwfBa2zW1L0hqVhcrAsf4boZpton3ylGlFQd0VRRgVQ5ONWr+7TomPteqUXZBU13ouNm6eNYVIiR+OEsr/8I4KdhTnpAJF97KefJMA0XWv7YaMt0m7O29otrBQBCqpPQmWaNlmkbDO8lHZ0ertaOAI/qf9gcour/kEyUtYRYcQqP+sYnOdiXFetKFdIhEicxGgqh5xmBvjJIiPBUOXG308LtyBvRnFsurx1Wv8L4KDhZ+Y93kDjNHlREGmypElpi0TcskQ1jqEErTtJH2AYZssY1Mo5SSW4audR8yLEZppBY1902Qrs1igKsGncpQKhDGOhkvEEBYfLx8v15bd0VVZLLtq0akc9zaxBz+GEzAg9Xa7STxtO+gcIxXXBBJgdcDqCHDBr+/9yRyjR63s/GfV3ICrP1d5B/Md8yco2k1l8iPJ3KWVJVkrfb0biQSjXS8TZfg3e2q6haSis5RPvQk2ysk8qokE1aKi2hOT3eYro42OLQgkd5Iblpvgq7FYzjlnMYQtEoubwrcnWrk47PULJkXMZ0MYBmrQEm1Bjm3Ieyx5oCXJeVdtyp+Xxg/ZYK7fvOn3lJlbOAKnnjPJAMJuw8iNJc+6vLWexXuz4F4LlSfZcXM75HROn+L+mwcnOYo2BFyISbuEd0iQ/NbFta9XEuuGpNkicPrkGgIQy1tILQj2Yes+qzwbQZr3LaDA2uqEVCIcdI2FGsrg60s3iDJXM1nKHTr6ePdbaX9Ol72JjsLyZccJS2i/0Au/2xJEwAshesS6HIzv0smjYJdLeDK2WjHMyTpIDMC3iZdz3wJtsqwOb8chZD5VV9/3OkBX/VKMpwCHw5CJc7CPq6aJMEXNO1ny6D+ffie86SRan0q9t19u3ZYwl+g8W2trXDFExN71wxwjToTYdoeET3PF0w8xjn4ByebZVpa5nfnX/7ZSUPJROhsXzEm08MElm+YbOwMCYTVT+5y4+TcMHnxiA4utsl6koqvtoyM/ZX/Z13knUjONK2PkmwzPTrHlXwvBFFW7xh+fx/pXjasQGG4CygyS8ET7T5ZbFxouUCZqUhPBZSIpRajXt6Q5DCPAs7Uksg66oZqVEsjBaanCWKTQPVek5Y8UGocpwBUWS42wfzJQagU7gJbxanS8deMCsXJMs+J2qsmdWxNQ8/N7Kjjdmub4dV4UIR1EWLtpyjKwNuFxvwV9Uo75qRTONHuOIrPXKqLBl+siXgK8IkYGtHZFGCvwE8JnaS0eVjUnO8pyZ0Wnk4vwzurkdu9/GMAVleepD9qx+U80xugnlIvi0Bwf9Z1aiMHCYjZYq3r3HDFVlV8er/pOqb1bRZCGF202ZBjeCHuhwrGGShG6qqwJLhy6fPuKykFbVqJPYYDIMc29wM8x8EhDRk3YC49RLI9zg2wMO5zhaITawwjMGajckbh8EwMWW2JSs6ojHO9yRHqWc7JnI8JPaOY8LtSGY+Iyt0fcdp3BQs0j56SZoKRucX5eFhciMb3hGkBCEYmCfPh7R+OXgaEhoK/pd4/NgdMjG2VG3k0bBR9bWFF2N232ojJFQ2KAvMlAv43R6j1AMb5/WlbUk0tfUYUSXJ2sQ0hPd57ae6yAsY/do4LenKcCrywbXh20ry+2s0sh3iQ4AZSlwBItaNTYz4ZlyLcw2i96qaO2etOfjrZgey4int5vLXvZAV1iKqvPY2631gZgt3WYzF85QIF+MUqmIVx16/U4eUhdDyO45Cr8fuWedmJToE6HJDOFNhxjmvgBFP4uoXh9fxLrXjelUnSEuBi/7LtTK+RMiTgdlFJYaD94tRbtQ9agm8VXB3HrjLCYcgIutS1mgjngj9HKK+0nm00aQJSAq+k0Vf4ubFxum5w/GFX6P5ad+jXjEcbCE+/n8U3lPUqO/ZdwzHq43n6wBMkWxdsdgzKR2Rr77t66pEsqWjCo1aB4HPkdNQVt3vUDXUS4belMipu7iZu7p7Jg/fuyfyD/IN9qDS7Zii8rVaKSfR/GgcnTxM/2BVjREiRsYN6gTtIEsa3hW2AVuH98eS1YTTgZusj9eCG8ZrM66Vt5UfSwxr+4Y7eqnN8uqa04x6dF+npFhYqpsn7ZsYeiIWQgoBgIpLNgNlbOFvDgRAfzDDUu0Ineh/tc+7pAqudW77256/nW0PFiMDmVASiMoJpVyLMXLtW7BpziJhGWcFlH/FJAneIGVQr6o1YKbYmktNRsJUnmzvCx6eSMk8zlhYWywRVPOPuvaez5dqlSURYjfvzHv7mtilw3g0SE4kYDH3n8qFGhrPxe4hZFq38nNcEJ/rXG/XFD+m+f4NaVznEYK8wZPLIHeiH2MaRYjqajHyq4iz2miqsCdJEtrd7PQw9xYM2rHl6BaWbbajxH0yenzwuLtwO/Dy3M6/pjVxB3X3leHnfDJNdwR9IWYUeKvXw7Efp9mw3Kyn4RAlXJ09ulw2gnWBawqLK7tp/ahbap9q1TmgMo0UOVF1Etu3TzHxKL6oNUsAkgrAzKxR6IqtxefMhOdfL2d0flSobOQQSyDrhWgI8kXTJ1FAFz+5r1P3XUzIqSwwXT0+qdIk1GW5SNKW04Zrcn2CvECFC2niNjEWeENAItLAgdjNRXdyhUW5ygegy5O24eBor7E69OfgxOBSz7uAu4liym6A4U3X2X1xZkZ5Qn17fc60VrH0aVZdbzHk4jkr/pBH6y/tHx3a74YZROoheTHCMdOlxM5SaWix7ROxE5bRpYn5gDc8M23V5Tqnl0+jj/Xa7x4avdf5nJTtqz9ftwVv6xnicbMk/sQENKfoMK24d/ntJwpXYLIl6OtLFTVbd7hi/lUyo778li/1yF8Raw6aGtCTNyAtAJNmleeL1RZapjbzwCPz6Co2P+YuZ89ZmCzl82ndI2nGQm/W+lYF3qsoBK6F9fGXVTqB35LGN7/Szq07kAFNGIzRcsv+WuUWzfBXVOXmFtMX4HIas9uejuezl8/RlHOKQDgeRvKCN0L59opKOXBXFIfyWhiMFszu+2T/C48jElC9yzV8Ia/iZAywUR8F7uM1+dQfqDDyGmzxgQZSIavWbsuDF29ohe8j5uMYisJS1yjrgKPFc1CzCzuZJtKIUXfhQ3kq+5D4qdM7ryLTlQp5zU1lZlMdwsBo0VnUCjFDSiIfgjnmVeOiDSBTraz8hmn47ukpnySmFXEvvLID5CdX659HymzJ+DGGmBYThw7wV3FPQ2W5JslNm2/Hj+vbY9+NALF5JAjgvSbX1OEM7ED9ZtbpLyS3Lcrjw48ftHHFDzuRhztIJSnD1pyP+tE2wfHy5a5ycXlmQG9feOOMoANCL1fKsJ7GKLPoQrse7Oy3BRdGcPHnAyCGvGTZNC/1+nYxHedJe3rOjmcDIBteZcp8QQRtRQpV1ywtlQGu9qYJ4nEFdvOGxwUp+ajjT75PFayibD9ZGwjZiKIEVf9KosCNNMqCad/emjzVhFZNuiI2dRzkKB33XTTlR1/IHAivRbUXDZwfODA59ekaRwGwMfxbwPNb9/DfY5vRBULLTg4hvWfC5h3+pibDgE8nl1RLPaP33KejxnByYQpBTe/Bz5lemW3CRMZz24y5QjlC5bdFLkIEJJ3hlB33JFarK3CZucqjrAnhumHUod0KgP9KMPG/7LeVoFo941ZQVD4I7hBHEhDevNFcCelgx/GSzJAy4sFRfulAZG76azIYqafWGscotv9I3VQTGtq4za1ZjOnm+T6xeaDeg7Sv6YmFSB7bf+NC7a5Xm2TW+D0n960OMuxHBClXThdMmCmyN12UcK/RZ47dtwvsF958axiJjVKPM2tU+vPQfKlQSEXLoMFQkDm/zg0KGcYffw3jjgJ2/Myf40XT9bBdqBjaX/HtdwwYdQpBTDVDiHaPRPwDiRgVr2uHndAt8D3Xbv8aBQn8LzXz0XzNWecqslvVgNwJQ+2tzlN7HpUwVei2AYOc7kSR3SmBDk3uo6q5sUurO9MBw7HGPQIPKE+SxA0AtG0oXL2faXzpq61lSRpxXHl3+WRunJ27CW8hptFz1PYfZICeSflC9gFQlvMpLsH8t0RMGb7wEeTpvqF4WOlS1kc9j7f5L+DJj4PFABqmqyC2VS+XZfWViviB88dJ+SFR+1lDgIyL5eLLjTQLZUhj1CkibLujsB+Pksv+E67QRC94tpVhA0uOQGd3+a8w/fQqcefQp7wzz36ms3JY+iCcsjzYuv0mxYLHrGtNECDNCUtXfP7QUXDJTqbeYyOLvyXQMo5hFTXjoFVsmtFiiXKR2e3Q9Vu/gR6D3U7+fwnrWndvMmJ+/qaCVw1XLZRrXZk5VUvnUkVKJNRIYrOPDZ2zE/SpRnv558ovNPEPsmEb9evsLMAUCzN4mrlKTfysXb4eKMTAyzcKbpk/YTpecaQkJIsr5j/tP3Nees/1u4H9sfEZ8+5/iW2mwtPxEt2Tv45NHrcwAcrfZlQLquSF6HUNolx/wooqGJVZj1YPeZX5mppeJF6dcnF+2AKkZ8FFSd0uiPtUHsXuNclkcvOlnt55wTQ2qqvggnjgqGXSepX/9CRR0cNU4C9ieo9U2gUUcsqYBBLWQXz4J4BuNMO+hSrYFLqEHnKdl++kzlYKxzri+P1+I+xFAbxj1lvNNxTcnJeKALXg3CnxwgpTOk894DNH+0gr5tcMNToPI9RFTm+cMfeTXxFNm3eSWqSUs+S/WAPH+zWYJmyJUUpr8CTMGyZFsIfIWy230ZoPZL9U4/oidsAuYznwO/o9u2GKn1iuTvWaHL0lF4TVMAMDC9Bvd2vKNk/xAAx0UuR99HlPdmnCT6bho26gMuN/O5XwUqcGLFK88bfi2lp0QOvVWcYTYAVhcnYVlKNshg8ZFEYrOvKR6q3Z+0C1sVQ6Z7UUx33SO3WvpQMVOb2KQ79oBxQofjbKYC/aEoKZtL9S9RVpx6Li4UAW7g0dDYJ9bCOuOOi649ulZRxMEhq+KGPLSyN5yElbyLq4K1qq5DjnEtWWLZ0Zt8YgNyxhKhAcyAAB6qeA94b+heFaL9fci32nLUeTrz0DQzOdoQxBXQRLsZS+2sl91rIEN0TS29InIxRJ5zEPoW8TKG0FOweuikkf5NkRrbaZWhpVbciA4EXr+LtgNny2N/UluOzORU1LeAWqCaC4FzqenWKIn78t/cEoQEtDfWBp9usawgAY13Qpn9Hv9DbSOMhitgTW5iLjbn6QSuusoHGfdAR79zjDns1qf/OJBd3XVLlqKYqqzdsjDtmmL9RKp52pu7QPETelZ1mu5ILoaL/jy6pJb1QB8bb3hhlACEKF1KLidenKS4CmW3ScPTwbtDKFW1x0jUmM3xtM8DhqSiptjj82cLMQU6+VC0QeByXrExtVBscgOxKdTOMpSghb0R7oistIImiw/oFFuDMsllubSw2Nmnw//r/nwuHRqkVfXW4/8KKMPjXrgzeWq3KCe2eGY1sEpLTbR2PA+wdsrb2e5udlNCGhGYMsglitKdFJRhZNXeGcGBBohVB4M3w/kXtj7GZNhW07vtiEAH32lhLouDKIeBuPOlh7BD1k3D2FjWnHEM1Qzfcpqf+PN/jTtDyCblFEMuwMFyohLfj1z+pxHFE+tUyPJcwGo328kw2U2lBp5n2yeyuRHHUmOMRZiOymuZW/tWkpZY27Hw2sUl5NnHyPQ9KkUaKcX12AZIIFhuu1lKRYri60qxqF4sIEeN7v+WYfUsb6NxeionqZ2xFIy5qanPoT+onPEgYgoxwCf15s8h1WzIUQ1LGGBaCElu5UP8UJl5IcKX5i0DrFfAIhzYfymbq16Yw7q8+Xw0sP0sfV69CFq+kFOnZEZhNDNkGo7yiZM4OtCEuL1xAqq1LI+czljGUX4iHoqmjor3rHvmnCJnh6JAhXDPt1p1uhayevcfhNVy8u/2pd+bJkZmIg2/dOkAm9angVqXYHSg7UbiwjMEFa7S9ZH5GcC/6j0LijYPaFRRNdAPxNTJlpon85pH6ez5WJ8T1b82NSppEcRn6IyVOt7Df5nJpV5tZkf+ietdiaMd/hMUYUu2hhONGXqomMs0m4r9hurYsPOlYGWiM2jXcZa9EtJMoCzCcEM48nKW8QVc/kT//2YJ+aEv0ls7lzsxIXJZp6Ug66YzHzoi0VlwByVgBjcScOwbRZO7/oUnhqwf/l5+MuFbEX3E84ZeGHMZiuUjD02yIrmRgWB7kX57xgNWeo5gs5wvpJwjiNchxl4yec/9oBd5yqygEIwnBOEgnd1hN7z4HtopFUh3He9C8dD6ag8yQILDOSCgeQl0q0Sl+31dmCGHnCMEpq1AvD44S0uon/y9vmdI0+Eb0yWVuqADEi9xp+TD4DzfcYubIkkLASW4sgyXSe33nSgFC7zhmrOW/ZhdWfTheQ2JiPgFYR+HTkg0sGEPUB1SzFhyX8+Ee6rzQMZ0+KZgqB6reiPD8ShcbHv+L9zDzVShp3qbzVbdMf1viDdgiA6c/SZISCEvj7VeEjMc4OHxBpwlk3mo0xSctObU1z4wa2mfy6l8/niYKYeFqhN5wswiXpGE/VZMU1Z8lWdoFj37JMamDywyJwXEbifvUSdGQT07POz6ZLDydmnSR3C8E5UiC2lfvnyG+hDy/wM+MASy+604OqyckSAao4/X/3GumCmO8Cfrb9F8h9E6trRQw1lGrt/vVQBQLuEKp9buxnlxjde85cNQv4D6HkFA3rWsIUyDyt9qsB+VPfoECP9eWf4IjwnbYP7LiAFcOLXn1tZwyWGL9ok/mr1dZwcc7Ew1jMK8iJYGF1Qd+NoOEWddjbPM2B6gFBC+9UApNUAdfs6tGKqrZdJYi3ibHD0LdSMdvaJd0u6NwIMUm9g7tLd07MmNteqUGiYt8owo32mTSLyx5B4+1hwi60P8lZyz34CZQ4mH3cQQ8T3GWXrXc/8mqzNulDn87rBjqk7F4NwnWRUdkEYYeVzy/jKle4xVqb806N0hBNb4ex1c+/+Yf0Rj1bD4nC98wYLCDgjD6XlpIQ5w3rVdjYsPvhdTLGxtIklL+YJcnzh6j4gh3suvwMgPzKopg2LpmtMF0Nc8qS0yrGvgr0hFA/ncuf4nIIh3os8Vz5zxvxX7sb1D5zIlef/6fVfiyoy7me/qBOwOUESytr7jFWyw1RQnuPVQkklf4EOokdtYoyFlhp0lKyCyDk4RxOlnk/RyoNEvUDU9WWYCXgxhlwwBE6Ogkf87XltzVG/jaOA7Je/DlXmtWVK1N/HbDLZAsL6etcuD4PJU5fVEPWr5pijyUqOcAIg4HzbwzaCgcuf3BqVIlacxk/854liCfi9hFFf+mtmCRZmWoeghMV5TymGbjQhCcY66pXWvL9InvwfR05VoDyIFSPusER863Ir+zn9Zs3Pwh9VyUyaoSVny7CfqX+apLlEesLmnAlXjYtlVdROz7i1DE65NW5M6i8e33yIe+N2ngzUzxPg/GDs2JaGb7ceAQvNNr9kYC++FjPaH06kj6V4Zq+0A5EsnkoDTROZWxie46oI9gi+E/hxhEbcmk22kbPu0IFYdWu4J5uBcerebAhrJagIIrgr9F/IfIrJuHbuXHAQ3XBSE8TkN5Hk4loUcI9WvAzakrosFjzf9ItweEWhQEAKDZtm3btm3btm3bP9t2L5s327aNWcQs5PR5JkzN37RlkC99V3t7ivORY1MUvmyuVWmWGe1ffiXq55Urroyp6tqNcJAilM9G9Mm1HYl+vz6Q49mk+/M58WwKvzDFt6mHnMbhpy0u++zepcpIPb2GF07EUtbq3Evshwvo6imouzRAxlWrE89rFRlJMyisa1O8Jdfrjoro3UeVYI9vt/yAILpRQ1gsiK3uSBMS2LJVAUR50LPA0ycrolMKI39P1L1jveO+PLXkbcSLIaHUBkVXalk34VM+mAa43bQN6XNOMzWEbVMw1qbA8v3kP8PNw4sNTNPhKie4TLbSCdazZStngL1etzPvthxQgwYjsWtmySrjIkEzxZQO3Dl+FGzokggUhLWd7S91a7HHs7CLPXM1xWGBSi4mApUwebCfQKlS/kvjdQxUW8wV7wtqMqTiZlev1ubNqAzS+dKa9nzIp57hjS5L2uCnD3J4Q9mNNMNWW6nzJBm8i5TvnuUwLLrT8oOD+2wAWneO7Stgn7nUCTU4mgkhPBOexBhWQ4hRjmYI3P7RPuei/g9uOKywYEDV0Ogf0jtrQjtpOlOkIVXlBSmjirvUR+u8WNLExDFCRbjA5w79my9WrhrYVG3FkjA8S0x35dseLDLnTcJRmMLQndf44P2/YE2bRNqxzM8NyR8T0x4qYRxKcaKXlZDXQNBkNGo2ru+cOr+QmrlXCf/XYFC2HsqaW9C6+dTwXaOFCrngIboajQWwpRDsGxfcZEeslEgEviWSAtFT8Kz8MVtgkDdxjuJL+0fPH/q9DSVN4P8cM8WNjd/D4+obp+wNJMUUI1kUaSMhDt9Vb5qRh7o9tJatSyTsyBO7ifC/fOGC3kYdmQswPa5ShrJQiE/defu/EYfwD5OJX44huyEvsN0IcgRxD7eKT9Sa3QzbuNCbt2toAkAP8bBnLFUFU0Fz1IHw12cHQKuE5Ig2MuMwTig37g3+A2NqaNN3UCJq2i9u9+3smU/oMVLcaiUB8nG3cfk9j/Oh1CpjSdsMt4VAycP2OsSkHU+nS2lUMedVGX0UjgdZF9KNYQRjSLcvsrP771kw1pXPTANhlLL7e/aV7hpQJESTZTeEY4tmzPHL3h/zyg6ncMcylzGrj57b2qmsqaq/uozwx5uyL4IdOkp+fKbAUORCPoaKU8YOlEYjvUa2ykYPK6anXM0rN3Dy1J4mpWRSQG3VhGmUaNE+8qsMCHbcwJv5cFglRjovTIM7Ar3H8uCpYyak8KGngPaxaRIzq33zJgPzfEG97E0cmYxjgNDTPnqvWRrI896Lr6sleyKwe5O5Ms9ktYmzjH9rQluQXDSEXDKD3KIhCnF5P1MhPf8l88lSIZr+VUHH9pqMRzNyc4w/jMhYZHd+YrUlEwuMUeuw8a/rXPXnUuQTo1x5/29SMPSz518QrOdAPCatMzoLURCiGNFHUtL5f7TlA+6kZX7OxApceIJaMPO3/+LcCrocCmpRSE/4vXaFwsoZUVkshgprS/4Rgl3ClEcvBHDKuQOD2TjvPpDSmdT1Gmeug5G6INdLd4gs9VrPgHsy+sNNkNF5EtAzfI1qZ+AQ0atmiR9q1shJ5JR4QQO2ZbHJNi3gQIEyTfDi4h20AIqSU0v1+5Y4XhnqbhLT/+YqTFgKI0LaCBY9thlUSlXtLA7JdkG2d3WJT8lxckxcG4yJ+wcrMBrTRZVFhgmUuJYvJUQ3hhmXXeJR3bYq8YrufTQcaB02ftm3ndeMEzCoesl7fZSpuWYh31L4r95jax7MbqJOEiBWyq37XAg15dArBw7oyjiT6iAHXsx9fGcskP0HvFBK4DmaMCGFyV4vJFY2UkGEydrL0xJtuZOepSCeJ+WBg7T4vm9KlsLNS7t1DMethIE9pw+Ui/J/FszRe2PEeFjHBuNaX5YsL7qDIFe5UDPSiCTCQzY5GAnTJnG2BvL57YTxu4Op/tOZTqUzp8fi/RoLGJliX+csYLK9RRDyhyKx8f8Nr0UEcLRQsIewJW0g/G6I/RFK7RFiwfpBlbQxixR67cy7FZDXjxc9JmQ6eiNQxHFUtDXO2tQf7adoSlxy3591Jf2UexGipxoOSrhAW4xb1cqEi8HCxNm8CDEaIod98AR+B29gLYcM6cUkN0HXKyzkrUM0OTorioFPqMU5uSOrKCEy2SgrGWbQsv1odFsM6FBJP9oWWW6TEUQf3+j5VOIfhPa4U+a3MQnGb+95VIm/IcB6Vz4AzTBrX2YnaK0biNvFodWlko0fhlgOI0UijtzOV7WGKipo1w2zvmhSXPuVjJles6e7e12IOZ/2GzfGLRbL2fG2n2RXcbVyh0mzMBELTlXd3fqPsjGWG2aBqYu8oSczubcNQm1TJHh/WmIkV8HmvVEQcwRV0Ddo35FlqUn9GbJz0tQ40NztxbKe+NF/L6dq5TOspV53zG3+1ZRtf1aw6TlgI5KZGlrxU2Be5beFJHygrL8paydf9OK9yHfPOf5j0WSIbnWUAT6PH4iRCBuaiOe7VByoG2xb+8dCqiHKhKTCr/qj0X6JLBfNpjq8ZHLtSqKPhoD85Tyw1GR99YrRMT6z8NcpfLa5plWs/8LHf1g0VdCIsdAMRqL7Z7wOd2gbG3Et050g2V1JRULm/VSKTb15T+M9F8JBBb9Wlgwn+eSoMz8O37DLTAgsxxqVwX8zqF1hotTdi1jah59oH7puHnQGp0ZxhTX59xxuawyicCqOnwksZquO5jF8fWWLfY2BrjPL7Dv0DxnKDPmNuCR5QuY/cvayvUH0H7dobGFwIJzWdvrsT4T08p0uE8gaks8sO0jjh0fdkFGNqcz+ML85PKytjkr0dTuHzw5Vmcw8fjDiOik22y2DkXmbZ1euL+OSHUHDp4axu7RY0F8adxmXj0Ma9juGDKqApNYlVURwkF8W32KXdMae/duJ968XxbxQDAzJinFZA2xhJx61+tb3Bsvjs/CRwwZuvgDplZmHT4+XvqjCzLR36Ab2xyOkeqRVCYQIsO8vqgE0aK/nr13Ck36SLixUL4oO5nN2aZi9RJ56rYgucpVP0t1lmkfVbxXFRnKFM5cXBH07jHUpazL1CfXlf+iXBfk5asjbn2YMkEuo2K/ueeCHHbgX6VQOsaiF/hd8F7yuezgculoYDmdyGJGu4YS8X0NGzlX7zfh2Qpz1BfUcJzIZHus+pW21LE8Oka7ZRA4MbI7cLts9mo2kcMp9JNtLCuKl6YBQVMDfSLBS7u1pIub6n/CQNqJc+IOKlZFS5E1ld+sh1qYRFDdhVXIG/tx0KmdTh1ZfaR0wkb0ugHJ/S/i/LWw8bNfZ4tXepse1hdiFnGR7bDqvNDrgoytbiDEGuEPECo5B2uX1d7vbKMeFsdv1UXsgc9fv2nhT+rcQ6qHHiANxhWMogb/SQcq2Oh841T3dE23b5VLqSIaG9Qe1rPJFyU5XNCQNq3KOyeer6F7ixVyVd/QxJjXCZY6mP1IzQAI/Pg/WJG1eZdEBmvbao5OKya/ZDtKOM3XMaALWhU6Yz4iD6TaOsTAfRlMv0Sh2b0t33RQSm3Zjkslu/ODUrBdvn5QyouFMEfCZ6XKF9W/qH4+X7JxKM9Q5TtyKuOozHFV8LBLXQtKdHcfm0yFHrGOGq7IAx7NgpJSO+unBgxRLWuTbEKiEoeUSoQSR5kFsXm7JbLUpab5f5v3l7GvxqeMweD7IPiFkuvy9f90P+0cBTIcut5ZobeTI1FmsNlhp/w9gMGu/y3haXP+8rX1kdGjFnYkvmL1t0UuJB32RExdcGUiX1HaQwde7RCND/Psoru30s371iDpXuDJjU2/laj29kfeW/yRwJgl1nAhbdAExcA80LXXLyESw2w3Zi6cC7TaqG+5eppnk6mbJ8p9BAl+Z4qmfG41YQ2V/1Q/nGZdL3xGm6+Ucg9Ix0a+HyLBcGx2pYCxPy1AFTxMbcBbcTGi/nfkn1oOyfxhMEe5I1UekikYo5LuIX30nDgMBs4Ptig1+TYJr5+jYW9lMsZZ8Spleh+jaAu0Mx0LIkOc5XwLVM+odZJOsIVxHsSvesi7I9j7B8ICXhPC1/rf5F3vs8fKtUxYB3q0ecoSnBIU0xFJgN4LUWea29RPeah3JTKqfC9md3/nWNZRlKDxrdz1hBe3dUMkWzTU/9Q8Jq4oyCKmaH0NzFaHhDKYAQTB1Asae/JoM9jahyZJbG86As3yWs46IgOv0o3dD6+YU51T4EBoZ/jWWCG0f+TLDwEa55wBO4mmB4r/iUGzyIa2NUzFOlpEOuszasxreneiOC9DpTGFkgNXydgEMuYiddZHCt3H8VqrIxMagoOxoVz6xHnG2Pg66tKRKwDNdoap14veEGi9XnYmUntw/YDqPar8ZA2TXSWKKCBUOORWgyZGy6cBySQctC2LdhvP48rXfn/Wj3TzEJJb3hOlY68rkVsxLc98y2nqtFISOB+CelAOlbdxJ4+EA11msjlL/JjJYbQ/PMeLHRNQL5CPDky57ePs8grsMZY60Kxtc37uq72xzMH/WwsnqAHK7PyuLNGfhS4GmSCJXiRu/cNAmpRSXlVvvehsHBo9++UhWHUohaZSOh/IlMGmxoTbiS0YLdMg9vQ9NKLr2Ytf2C4A26RXZnJmhRy9OG5X55hXQjx4KqqNKi6DDh6pPCFDdgsPsLSW2PlMcCqVldCh3ysB/fObuNFmf5z8dxW9jMPJGu1YGGF09kH5DV2SKaHkoTiofxug2H9ePN3Shl3DiDCTtivIb+F7dcVSjqECviPrQJ/hFmgYaDk/dS58WJ2osB9T/7DhYjpIGntcssbuPZH2r7Z4jFkAdiLM69hSQ+R2pYlGpTI4MQ5KK9CHPOstNDz5x2h1lWCDX2dw6vHl0Mk8mqimqnNKbG8oIfBMPbft/OkvXQ6UjINkcpHQ43ACtZbqc893CDLRtLL/PeljfeLvhl5hx+extOfSyduZdIMJHfkrpnXc6En7iGancq0DULkH1cbcyMUEu2KK0lp6gsgPzNt9GWZJZbVDCyjtDKoGm4j/L8SL2X+d4ZQbDr87wLPQyu6+vT8fDzIaTRyvSpOZmP4znGHq2SCCILI0J4Ed3tZvuxemutT/JXckUDnjIMVXVpje8IEaLLbqgcD7qyBOWucZOnaVPEZ/7/ZUprtrgCzlde83iZnMWIrmubBefUP/B3TBk3QxZomQjLjRH1BVMMPlKc+QQhv04s0BI5nqp8k4neSMyAYzmfd9X+2yhCrYEOfw3Ddodesogma6+MRa2rX6Is4KeDFlpH3ot/6vLqaDQoDZdQjGghtH/I9Afj/WgdS9TNXF2Sx5a7a1cOfbausp8fWnHSjAsvGKZVsk3E7IUiKmMUHUd4O95yFwYBDeiz5Yf3MEm7Wlo+NQuohFmwgnZgk0+xDntu5zD4hlsb43DFqaZr5LO3S/jF8ak0By17qOJsmTcKrKDHfmDtZMpfKilXJDYiOyqin1C9AQBTyWcS9Xu0BYzS1sk3qZUaRY6BuHuNCi3spKRZlp0X+nB4Esz35MZFa5gNn4eK1Tq33RBAvk4Of0ABbTPQzVLYKP0V6sP//GY2QnbHnR1TI7sktZgpEH9fYkhYQSQrn//yyfsAC0SL98v/6Y2Mm6BjtLY1lmCWf2HVCBhECi0iD2uWN3AABPpZnIY26RMVUKbk5crVUIlnuoTE/r5tnfyqp0qe0tN6xYugUYV3+e4haQTJ6HL5dzss7nrVDPgdNOoKg7wI0F1WtcMYW/Db/GxS4EgncHC9332zvCURWWauLJ2HSHKJjdqoE7aLZ04fKgw5Gl5AaU+pH1NvIii5ZWfjObYWxf6Z/MSVWNvvCsFLZvf1s87ZdRMf3yb3W/TWo0tgTjv5Y3Mt8CexkJD7fJZZO1FcgV95mydNv4KziI3frKKFbodVpWy8QwufKkIVmW0YTyLsOfi/WQUHcdxw6gpWI1GfqWlU4oraPhi7sMgdOM0iG11zLNHonH/p1Yy00A/B/mi+VATq8P3QoJ6nKqr0Nn/cprqGSGeGOq9Tnjq4Rgi8t6ahlKBlusklm3vmIyzw7VjrzWNIpBUidJ15TTxkyIjaK/LZ+ATXI+XarDtVjd7hLuwaxi1pwU/lxi7beu3rwjifmhl+OadrRFNyJjyxiWqXaunvwb7sPyoCIQK3PWKuTGWa8oOX5cTB0gxN6gE5iBotKVEHVPvet2oLI762ph6t9VVQNq601C8dVEtLnfY39tLUzXhvsJ5GQuWA5QxYCyk+7DZS0m130En+NR0rXi9KMNvYbHb+rzwszgfX2Ndtc1vfqhqrCU3wkOZKna5dPY8/rmW9DUeMQ+uCjg7RhzncyaMNopDxiDQ3nSovzzCxN2IQyQAzBK/feC/w/AmfsPT/DaeETyN52tZenLXQOW1jrI5HSo1vPGFgjcVJP9L0GoSLo+ZiD3qEALXffTm9C5ZJYax7TUgwKOEiAPemPGOHp5cLLd/tWayaDAc0xtYD6CFQlfgGkAN89FhDBvy2IaKdRzMyX1Kqixl/UwN3Hx06AkKdezDTcrV4O7X4bc9SXqDoUOmzX1+ldc/6l8g1VH13DMpBA7PZrRPcsScdrONaLoxSAtwyALChFgjHyv26w+lNtNrIvIj3/TGGqa2uTbWnwvCgzDMbwdQGC+E/GJsOK3M5HMXXwk1anHjWS78dfon5s6VnevqBSMyLHkcn51VABrVOOamlLRyrit6GSIJs9QSJr7hmur+yGD7dTyMHWKiYvucgq+SP2aiEg91kfB3inwXr7mQAGs4M3hMsj5G9Bh3KpJopp8zmB/RzpipvQr5jDYlOtxYzFGLX0FisKOk2lZcQpXa33QJJGsMo5tkqaU9Sq4Vr1rKPruh75bRBpP1CQuwAEOB+9duXuZ/S5y88KqjuF2hcn4cOSlTVbRO4xxlGgRtEIYE5Ilsyj76h9/NfbaAcLYBX7E4sYe0fq5/6QpAuoOiCwiTINnDbxeqtpRxUAuECI6uYoRLSPgswiISmA2wwcfKM2BstgGn6bSH3evIuiHFOOCV7E5ZrcnP6r8irvxuAYMnSL1wAyflyfEoglj+AUpvG/YP0wUj/4HXluE1Dep3nflZqqjtZGlEKMnEjmJ/5o4rdggXZSdaLICdkPngEYttwdLfYV6miY7qu/maAOIWisZcM54Qzg53tXMiU8LruGORmBNwQiOJy0bekMEl8hu+EziBo9ViSKyC/xsSLKGeq9Y6pAxxYKBqTp4rjOttzTIrKDKRz1TaacCK97wpb+VjpwAnatkclL8I1atX7bI5ONilN+3pSjBYQ7qY1vaLHrFL4OEpuIlxs3ulCA2+YR6k0lWFcitBfElD9gf/lVwkV8SULwrV4eIwmWiXdKKfx6JJKky2Kd+YqjAuc8jERZk339psTOu98knxjBKw1Y0YX7B26GdQml/ZJ3eXQiG7gBalsYwNQUAk81SBqC5FArfVrFb/Q15CytsyNoZ5PCnzd0eqVk+IgmTiCl7JOK8eQrL5lIAor956bXkomA2o1UE1hQH0PDF3aUB1FM5VWx7KEITCGtxkketr90eeit8Hh3s8QtQNZ002qLzR8FeHMtRyI2cjtffLuMOeffI3zH7C3bfN8e2qR4O+GWI48hxnD0WXYLk4X3OJfmheNDiQgQ9SCCmyvWkPd0NR4NxVeer5QHUTYdDol2FMV6gu+t+/ia/N9QXpE/RCj48JSO0RE4zNbqOtAZxIS4D3KHPO4bfOn+L8z05nvJ6Y5oI9CO6chQDX/MsSxtz93lWci1LTiWM8loCeeUEGw6e3Lgawbw8NS4c4fAvRxQSGOTYT4ySJEzBep/5Gw3ggBSggMoJZZwSSzNqT+tQTAQSpWLyYGbJ33E+cVw+cn9ZPQwNTzG6X/m5GSDvGl9SlJj6zhQmJDhBR4xE9cH9BUhB8/Eq/l6+Wc68ypFLB0iPz2+0KVee+1+LVoHO6og6zrnaCaf60OF+jFhKXnER7Q/fXNlAZlP4KhC8tjGd7sEBHsOIjL1SXFtqFSuuqfHklH48vDq5vjPPw+Kwzbvc/Vzi3HfW9URDudhp67BA8UOc9ZJwgEMmP6sjYdgBD4AgUgasG8HrQpU/sF9iRe9EcYZfGpRr0YVcba11BmWPgeT/1D3Qua774u5Vph1VRMD5MhxlP5WA10WE71EMNojhF4D/us9U7ZkpEP2pyjNZaioQFl2nUq6TJk0HI125Nm8ha91JmVRADm4i6rfSeighGZGb3MniXqTQEzZarzXblII/G0Pf5elu4M7yHR68BicAgaJ8KH1wBbrloYpwmyUPHWyTlZbCxzejuMtXgTXVmeqSe1h6RYcEUvlPlA5bUxC38EBL3zq6m5m3PBze58l4ElAm89xzNQxL6e/s6MkgHc/uoUrF6m5l9+Mw/7JxxBtaHq7oc/3g/pVN+EIDy5YYH4fgC3SatUJO066EuJBZX5Rovq1Q9tK7lWokc523ArgvF1LRBriZbxL3tT8qR9OWLa/1poV6UooWDzSj1nNAb4zLRtx2E9LLrKPrbdr4BUrqyQ4WIYLqlOQz6uwYWI4Ef1tKJVNfPVNqlBIgMTuL0ABMJhZNww8fXmrDEr73gjb86BIpdNCJ+GsaNaq9eJMXNFehXjoJMQQ+Jr8rZfn0HTBXAPzzQynMM60QlzRL2O0DorQB1JdPJkPJXQf0IJVskDMhPNW0rlm3n9+l5+hfYgjhXMe/OzgjIQ3s8T7xRlZdvcgNjjnrrD5VXaKr4wr6TbGMMfEP77vKlVDhogJPGgVsxZlQgSdOccg+4Ip7Xdlygi5n9iiKhz2I+R3hDqQ5fAbkTJiUXAr+cjkEVAF4z3WMXsnq48GHNKLvulKFPw3uRsmB53E+eXOrSzOztKzo+Vk2h7FEbnutv/7tHWaxpVQgpOWaD+vY6QRtR7V42oEI/RNq6IqOOIFsUjZ8p5Vs282k/5nrZ9ekioYIIrX3LDOftzNRk4Q9oo3B/+yCswWwiXox41nCuNLj+EDrkuViU24B2tR1qfnWZRJI1PHlnrKSxuAoo4FkYWu1D24juuGINyOyrb9nuNgIM1QLv4RPThE3iPGsGZSPcM2B8o/m9TPVw28QVgrLKBT5ZPUxWIdmv/COj+GAPyoIWOHT/dLI6TVywu3Ipui+fh7DycaAL5WAzMHoRX+CZ9LPKCgyr64cN0qoWDoHl9ZpEGwmzwDce9gPFHY+IRBqx49XTLUjj/JqrQLQwSOchdXFm36ojn3nVk73fNWooYsTrsvG0e+IbVOSEPbEzQAV535QryMsz9hqv3U77fn2yDfdsfrA1zXyvKpMmuM0XUIOVDR5/mrx3PDAGmuj6lqKjk29XQJ5YXOmBnk82trh4NP1xvqrVccS9pk04Sg8yBI0MrrfFHbvnZh6a4tQotsr25bb2eXw/k7iVrWzNzWW9hrAnwuWjCgx16BamtmA6LG0//emYAp8OYBxiYnHfFqOsDx0ulS79QrEogLCWqNsncG9GBGesV9VwnhVfkjOUpkf2J/4vEjFG8KMLyz8dKyuMgeNotzt7HetiHr+Z8Lf0lAjv6ITkagdZO6IkwO7Bd8rOeJx/QOFc0grASTeEErOrfe3AxWQCL9VsVYokpAyEURQb+oGAOplqQ8ZsJkKgNFBWzujO7YhYNfLxvF1Hi3Cta/uKL+mQQMSVeGjBbZNqlix5CdV+cebc0hnnPwJBin6tMPQlavqzfom8Xy1w5ECPfZnfJZq8D/GCNEpYrHJ7Ld+n2LvkrjV61XfykiCH21Uv6n8IV26XR9eY4Sc86OeEzJUS4tBD2hIMzjlUQBuD8TL85SMTd0mDIjz2BOQR3GdOmW4TrRxN1RGvd2A59wJVS2kQvji7siWOsmWjJxXmhy9ywsad9RmNFaDgxhpHy+0CFjP4aHxgrZ2+0v38hG7EyJuPxqyAYJUE7B+mWFMobV19FzSs1Jf1BozyumVGmHrnL7SOoa8ypnBxkR/XxffKITreJ5QduUqWsdi1J2B7TfSOuEHF/NcMrEewWnKQwxDmddjQSUEW9bVPYyzlYwYM3Ql2etlB/S+FQYQfjGUdseU363BsIw+RXj6QUD5vd+RgOPY0wxl/UyH+fEZ2W26qHjTdZ+x5QGCiIDwgdVce84vy5yn4jCDRBqkiiYxC4+ysp86FNVdrbfbQEUhJW8jSzEDzaSLyjKERDfIKbocWaR5yPDWG/sImXBZZmyBVuW5NIFXV7u/UY7Ab3dkgxP5NhQE8C5pIvwqZ5CB86Y+lKKYpYa8T5QCafgpf9rdOFA+LaV4WYycnY7M20WYkJlm/snziDal5klB7+V7BJLbvfZZyC/trjzL4mY9GseYvD/siesz+vr1kH06sVG7Tq6pvdms2u5vemTM2WZJ+iGDuGDvgqPHzRiLLLmWi9fxMz8r3wyFUarf2GAawft1tekkz5HjLgpR53kW57jwMEitYxu+9ToP8L6MFIr9pS6fmc48PVeI4e8C1aq0YcENgtbWMqqtV0/onBGJddYy5t8uBmK0lHZ7Yr/KD1W6Xx/8MGzX1F9pNKq/T73MyzRbwuwq610Brsb34WEirrRvITq4JJnb2I+R3DQghT+wNOEQ3dTbEQbjxYAjit3aZ5lgfap7FmWHI9xdzCwn1EuhsnEhctJZp58t1pK7MCPUVOjdn/mNcLmaW5D++BbLkYMUkcJFfH86FNDFrQ9kEms+tG9UaE1UpG8wL1qehWeoInEEG+wd8MyfRr1LY93oGa7JWhM9SCGm2HjTSQo7IAJwSe6PtbPbsOAFFWnlWVMLQQt34qxMj+kwah9MCzI2qzZfRHhCgjIkfFw1lDNFbkz1uihA0bfyxXazwnxwGAg82q4AStABB0G08aZV6rG9tzKD4eH7kzwTwKwjb3+ttSpyEpS7kpkSSQUx11ov3efcJMOCkCoPO8xu8SH897lkdAMw1yfmOowAO669ed9XwopXZY2v3361qmp+8yewqh4jpGMPK9Lgnv+mfWAzEBT/UVoARfj7TUQj0kIJESCCu2g2HQz7dQzf7EunDXYwT/sgOzuofhLba+OoIq2lulvLboA4nsqKh0AHefsnLLIuLRDZNmlbyCULDpEq3dFsnk0tsZJXSo96xMn0t5Meko6CQhi+kl26zXEOsGuYOy/tEXDsSixMm4f7JGVTSC8hlnNdXFzzh2aEWOPmpHdrdvbay+13wbogMjY6iQR6PdpRXT75gZFtsdhdRd6zzYmMs+BMvh+W1V1nl7jbCWNIxlzFS2zlk72XyVuH8h3q2GaltuYi4r5ol6v5Cq+msLzUERFvQfz7E2JnTZdcsmGTmLlOrJACqd1l875hs9kcahTyhKukhJj14MlVxP4qqQVb8iXqDOvHwoNFQU83SLrffcDdTOwscxIY+aVRTKVzg+rxZ2rJbT/0QhMswbsFfSCDpptYdq6MthR1lzb7hx1oVRCuhvlmzVr0d0audIBs2zFohokYuHsfaUXvCnGcqUconsKVAJEQcT2mD2D9ZY7SG9viaCmfa/A4heEod8cMal5lfk1dBo4hXSSAY4ucVoJ5BFAyYf0eoE+FjQ+1KjB8iUPAJilZovT58IEOQbtXRQxTwcTroCPnZxuZp9ZbGPceUk5Xj5LZvPaQoZCMug7U2q1Wa496s7nFuTYJT+LnOWXO4DmYO+7YNOGCTed7PfPY3k5QKnAvdZHmRx71wYO5IS32DhwFxbh3DAc+pf4M2UoZVVMF6P3s+okFVBIxNXPdaCLGKtuZq33I4hfIWSa7GtP8AlbTqe+3CdT+CzRbO5+JIXe/m3eXZ3icUzNLGnNzav79OQvipB36zBbylvpCYnUtJ6sCO9e2rxHVJ692ONQgPfm+df/FwSplY/UJGqmQtpVGga3eGC+fnYdYejbKHwliUmMuDOq6tAo0IGHmhY8ChFsXGbdtdjv07eI1XDb7ZBow2SAoyAPf9xvIEcff/sHBSNRVIHMM/KbWZWK3haEYKZeeXHNMedxFnG6JzgClF99PdaAmTWi5KRK/lEUxGHCBgfbLnz8ijpeirFK1LxEYqlbpf1aYLuFufpIHqqYMP0kBAD5v9VGsN6kt6OTNLrXvRyqqaMxv4cmJ8XeX2rZ4krzXGA26POTz2HVMbsXS1Cbc/zA7EP0YoQ7rT6Vf+WLbuY6TCwzVltFaYFymPlEHsoC/HcPiPa48v/xEFRjz1cfn97qq3PsXs7vvWecG04dafrXN6c3Ibv1iVM9RNJVAcjKHW9Ul/GoNXz73tYU0riDEQuKZmnIPNl5bHYgc9rSlhNi77w8bqYHdwRuh0jb8EASznsYzY7aDv3TWGFF7XhgvRquxNNNos+is6rV0SkUcaXJgQEPFnsw8/R4QR03ylVnKF2e2DWuaaSd5EGLIEjytH+75w3Nw8zeg09gtTkcUNCngpzJhtZUO0ZUbAFAwfx42tpW8SAxeiF0mfIbGX4Yl10Qc4hb9naofgKv8AmlRx/qU4/5bHtMe5G9OOwyt1eP+6oznNhljJz7HqsTVYz1uRJ/FF3aMT5wuBIrVaSMznFF4lRD1lBQH/YrvL9oIwBdetkXvgR43YxUg6n+t/C0XW6QfC8A/FEg1poSnNZKy/O6apfCsG9m7rvHHwtrYj6wK8qpCNs40rTsQEbS+pzyENgZIKmmar1EL/ylHjd2CLFGmGXFLRARDMoz3/kE7M74P3mEt+vpyekM9dxQ2CD2cfmta33rEfBApzCOJwTshDFy4m3ZZwxKzk9wWuOLnUFukuwFlrAiKnhKeEQBiWKhNuQTdpfPo16Ar/a60zJvhTVZni9uqsovkj4aqbO8HXnxs6LJLtI+goCtbWWngg5SL5sSimVV9XBxfcE4lrdtATCm0SmRiFh4wMkXKEjeoTSGaMNBIrIzAAAEXrYuU612h6pyH9oCP7ocFKf0vIRR7DdaTbmW0ujZ+td5X6OizYd/TjOB0EZ3oSOmWIRrAhvK3ys/bmU5Z6yUm7cHi/5IG9UgJ73su5LiGd0bEaELozBsy7Jqr5EC+BysSUR8co5ajkhSpfAJKerkPe2uJ2dE7RlGpswAmA6o9ylAWGbgC2YfamX0lxi1ryybkW3CLb6FO51SOogOHVNLa9FID4/lUCIZV8D6RigolgF9s6O0hRwXF1bEM7/OGa5VnxkkC2VJfudqvg01Mr7qOoOx8/SvEo12xMMhOW26ZW9rcgnZELPLwzBeUC1pmpF2T4vELHC5f1EWdIr6Wy6gcl7Vjvb6FLzsMwEH+yLmSGzBRLm+V/Vg7DiGBGB/mRdQPR8j04R1fa80nXfSOByD0NuJpcUBBbpOAnruyuvrKmx2N65Y6ojJQTJk+SFBc+XqgTVayZ4Ln9JSUgGJkpQ2Y5Q/L8IVJP9ThINxL3eDg15tVq7HLoBnuwZ/JsEc4G1iI3aCkTCX4RUG0oCAAtMXdJ1i+vwP2AvrmUVkBjGeu86xAUoxfUyqrnGgmfA1ka/IuaWrCzoyO+9KJCRsug5ngo0W6gJiniTkkqE6R5FdtA+LRmPUPN/TpvZbFDOQNoo3vHC6OhCUTw9gaTuXEAAdi3bpXBG8wgpMOdcUJMBsjpiPlUjJYwOsANe5HwT6F6Y/5DXYX5HDYgR5rc3jy+nYuxBwmq+ic1WJUHMMt0omcwCz7IDy1WVeuwo801O5kW8NQShrdcDrwpkdnrRTheF/6g825xuAnnyNPUZiosd9eXOf32k2MWF8AIh9Y9xjyy3Yx4yNyDQMEBbI11h4KIKpBqYeSkeQCfJeUcUncTCHVTQ0893cyCyqDaZWtoICflrnmtk/9QGvyKO4/tI6d4tFnUAyNOWBKVNHN7bvPk1gYxd2xiCPVHxRAdfzGpcauI0FlDyX/txqhaANKHerky+7GLr8o3+bakF59KGfk0tHpI5/7XnnsZv3M6u4XR9GYb15IesoXnTQ7ke81HCFUAB3jSihMTfKxawV4OcUj8pknrlctOAPus8fEjB4q+JJ8fteaJmfroTvu9wmlJ3Lx4pzVGl/wqGPCrBdYULGAmMnybINZRgNGg95tv/oBlfrtzcXFuWlw4vE30VipF//tgkZam0SBJVlG2AX5cAwtK/YoWOA4rbuO4EbB61DsW8FKXQjYrB41S9XHadmYXz523fFvo8pZQgeliZZHhqgj7dinqJ96Ci6HqE/GlMmavRfhm0MmmDMHAs/t3IMV/+frRkj6BeQWcQjVb1lCrpUmtWwStvfedEUMxlVLe9Zelu6jNjdcUCp6bwTbJ0BBBPMk7b4wrTX+Mei4+2JvHf6E5kchYBYhB94GJdMDQZ96zQE78L3l+oZGImu1LaEULqDXC5c0TZv4z3egMWL45VsRW255yT+POIrZhjavD3h6JMOuwr/a/htfwiSruYaer2u8Rbre26FICZgrkb1JulyLSc+DBxDi9bX3SyL/TS/y2HDSpCWtU2Zjr+XkU28z7t1EuoqORhfFVzXjnh9JvUMq9UVGXKF/vcCKy0Bbro0Zxd9GHO9SVJ5HzVH1+lONG2Q5ERaWHvoKzROCOoO+6oktsLgjkHW+gxyW3gjGY5KZA0qwCzDOycenQHSWAsYGpR07vGZp0LhtAoAtXc5Zm7nVyw4H6AnpI84be+6Ees2tfb7fcla3YkL+iBkNuW4n09CqOUPU9xr/m6/JGV6Tr8RD3V/mfFHl5GnL6VFdZGeNUtl1upOH+VqgXd3MyZc1VpXXWRY959bSNEoZR3bsM0ScLIP18YnWfCUiz2ghn1RqBXbpfcYnX1rwQqz9WJ2FqMBqhOyKWyQZo7P3zEyn7tJmAuNtrITHbTxTFSKXOocqmUzO9EFh3pDN0qq5NSaS9cIgclvYLLplXhrgXjdIm4ems9GAtjx7/RrAAK9/a59pft2Ji7pGDfLOo5dBic8T66Ilwwi9Ggyab7JfPc4ArEIhP0PrG/o4DmUeeUWv8B9ZhAUEtLo2sWV3Stkdr6qHpZhHnMQEKNWzI01HbLeSesG4n7d1hKhOCBNxxfXKRCvNcHCVpWQX1C0iqThJDsDqBaUNlua5et0jPlFC5LDEu/2qyYCuKFwvEhr+siUHcvgQkDkK74pK/zvUfe5cVLV2QPdyt6aTwVqX8D40HKvFuW6rwnHbpCbQjmmLaQ7vwq57L986BB2e+8CaVmgyXtu8Ahkuu2RwfNdUwGQltvDXUQA8FG8+Hikw0H/dBsxV0WjUrXOZIPMf31hUXe3YVDjO8a1XGsOfHyqY+YIDMHlKs4vL7/LVZQdypP0vrrCdwrT4v/Z3TYPjcrOkJwFEFCzEXFhgRX8udI/DaQmvs2CzkBnRiv5v8LB8wbupVk9dJkzAmAnVMi4o18jFcQqPyw/NOwuNglik2SbZpPp1Cn53sIxmVOjRynAx7ws6+ELRAcxnnXXSW3XSjHoD9TkOtLoPXHce31pC9QPPa0sOa+HOBuvqgb8+wptYM2VCZpIqANhgzx7xoTKl61KCWZgv9MHAJUTFfDgO3UGbODf0IasKIbWyt+H3qgkzOfYGqSnfG4l1Hoj07Xro/VfBxVol7dFuXPeEP9Ygblu8VvVH8wwRtKVqwW9U8I0wxty7qkQGRunnyhVCZWMmF4ro3Rd6qvOqoxOnTQOQxb/KSaLwaVIfiSlYo0HK7BO3JP/YL86pNRZWfPZzED3LDrwpZIoEEtPpvGO9wgnV5yqhFPk8MtEIcHR6HffFDJpPpqZ1PPaBT8wbnPyuNSPRcZXBJMEJYRpAuFcKtE43AWFdnU8eNCloXRrJGARTPvXNvUnJrccSOU5KVBQJvpYvyPaRdET16tUIsWLrWi+B0PiSrivNF8rQx0A0bXNLZ9jeHzTsuyXQVHU06BBEZOKTpFHH5H/bf71yvlU1ekKxR53F2rTRIcV+JLVVz5u2GG6L43PvRS8ZNisvXE13jfDifknFwoqydppZn3S4AjSFhbicZgPy9/Gxnl66Q1yyc5vjF2bGuH0pem3C0pYqB5SlICHW947p4RCD7T//JL3YDvs97BTcpZymEjjWSFq1FWmQGdBngt2x6CEr5gxI0mDVhDr8PQsZWp/P+kUGvyqOl8cYa6eCboJ//dWp+xiUgbFG35PAEpt5++EYZhnRgorGqgbkBEUETW0tteOVt9OUBpfGkFMoc5piNo/K5Oc+5iFHsLP3WbvPEt9kP3yAkolqXxZgUBc3mV53uTPA6dtrVZpJcGhO1JqrjjwQaPJg33njKgjruLxPMofyGphJ09j5zVPJOFyOAQjHtLdXK/CzB2itOilHfIN3sPt4JDrXtAcIo1mA9pxmPxGx2puuDbqb4Pkdz7inUvSHIQqv7im5IsFEgDSUVJHLzFvVXcQFMaIxHesW2gmgiwNo9v+JmJvZsleNOnQi2PrvURXzpAM0xF71kk04HuzvK8cz6CUstNbnXy+teGPkTOegwFpqtOkk7hyuiCKwWhQyM5zrCxA1Fa7RovmJfBuaLvuUagy65n19bHhzClFdJaB/UYvzujTs3TH5XKZxyGXVu0VE32JtCXxSAF1PR3BNfwSvh3/2Yjn2huvg5c3S1y88mIzwlRi6Kq3G7IdYHz4V6J4h7o6+PgHMK81jf+bAnTMUII8R7M47a39ugsMMvnRY+QRHoJ9EhqKjsQdAtbn4eMhwAiu1eT4DQfF7Gr5FqCPFvGmpiGHZjLNLxPOd4hhpMJj1u0CNqIOhchVVJ7su4JO5WVmiuBF3T+tD916WnJNVQgrh9RYgNP9GpIGvMfsPZW/7cCGJ7eW+l5jZ84SjIMwK9+sat1+MKwikFVPyICbPICTgOY6XuKidp/q8CJYrGroMyUQ67fFgEeHVWjxlPt+a8SECkcw6bSQ07UFs/qLOcq0yblxdc6gC/ppOWvniW9s4IJGweMwfvg1gdYflgGdf3tyG7lDeHZ/PHsyHIbevvQchOp8oO34sfYUbQbLnw184l/CG6Ok5Fqo+536Hpn9wrumEwjJUgEYw9SJLrcwmwrZsm5eMKQdtuSpQLt8HF3qrd4f6LnNyu7FkA+Ta9TWqWDqyAlBdwveZtZ7EEiXGseYIIrd/RucTflgQRn61f5I5HRzbFr9gFZmELnJ1fJ4PzDFFQf0/tZp+D9I533pHvcm6P7Zn6v+8kqPoV7Js5wWyye4wRtgorPt1xlMN6vH9q1sa71Hcq/wj1F/ss4JuMZ9dbcuigTHjVC/H1EhMAZ4T+52ljcu1/r5x/6bMEZ6jLns4/x7wV4IDQcktIHXumFxleW6yvVXBTce9x1HCzysELoaM6zgcHoqbkO9jQyFwhD39vOn8q+DkO9FUxKY4gBiDnm7ybuIsW4wig2UeAiMHweyyVB53AcdKz9fVFQ/sv0rPHTJtV2+EbEBGh9lAQa95kSD+WHl3kek4G+2a0QPGp2Y9jBL/3i6WdGrvKH3ikb+DtQQy49bnWPUlsd7rUzM6YsYCxTVOQAKHCVS4OHrprK1RUABHJ0M4zUkCu9MMakr0jqBCytnsN+dNNIRohwFfaXM/LyUyffDhv+Ff2taGwdXR6WnQXwHvW5eyXbUzzPZZPzao4AQ/wGDJnY+PrTCYx6unK3VrD5BPmBYAi87DYkykSgFfUkOzdnDqHh5OKR6xqHtWRcWzTek4g3tYM77BX2Ci0vc3Qkh/Hk+SOu7ez2V8VEi6Ku23hyrw9nSaN9rXrRliMFmaMy4zFydHwSczxTOk9OTEXyze5xSwSM9pP1cR5ZwOxMZ5xXHsELj3yipUqAR4qlY4EP3HOcbgRR9F02TSRvE7RfNGQv3lRxheKkODET1G8R2DhW/GZAieTlBr+87BPLebnd+BfHu2KBlT9iubNyCu08lYzYZqMvaSgdeFo9QRSLoDwAFBe5SrpTTpBpWGesSKvF0Z008MWFpesanP/xY5EmoWg9omQpVVYpc9COlIUeKRdIfY25/78aFAtDvGCJfBAOurnNZKSipKXxfiQKZaLBnZg4TYrtaFH//05FwoGasNUOKtHszSs7ppQzgYTwENvflGUTkqRbwRoRKo4kSkoRJytyywE8BK8TA/XB0m8S1AxPE6BruB7JmJDHmtXJm/DTCVXsUZEgCdGaZAtHhbSiiahnl/JrDK7yNbn0zIytxmQ5kcCb/2a6IE/kX1cS1j3TjH5GGN16QatyAbGTAX0a0vNzXvpE1kL40wJDilYmGTyX+diX44upzSnezUV575csp6DsxsukwwoRbVXOUclo4421dmJpYNRLhv4BbA9+u7c6zOM9ZSmJvFMGkqIqfVDnd9dhkSZ8doKSv9I3J8koKTMt1qfBPDX8YreMYVi40aGlsEcjNLgoSLr/M6Rphkw0EQrLuIN09K1jeMn79cGorsG4TrlQ+yiAvzqYzCLCJXdwcRvnbquoQ7RYoyzNye1UAMKwLZ2YTeem+g7DQNkqRQFPun6PG6iawYbockQJg9r5r0T8LgRNZeIxhwTkMbVFYHQ47nKzUrgqn0eKu3TLKAaDRvJWeeIuPTz9WFynTLAfrPPynXjh6HyG6uUs4SbSMMyflWXNh9sSu2z8kw5QduzC5MIIHux5FvriJUd0lbVMSfNKde4synDZCi5TIeo5F5XeI+H/igvMR+mYbGIVgJGqssJ5jmN/GMobOaV4PpjkYMMoSYAWG70Yt0DNsWc91C0jz7uj9ZwwGRJ+FwPnOzDVnv243sFYLxcrIh6hFpR2ez/ddwn+O+KnnsTbssCXYL3g4M6idzxG11qtXK0ReH6H40gbjxTs7kc6h/0prhgklK+OJOXy5ueObZJixb6auHnrucwyoRlvYMK37T2aLwXKJ1SUh+8FtzW4bwS5YHFXno6tx+nQBWC/pFZGyMR8LoifRb1TFNkmLgJc732/FNip/14vt/QpsBU6h0e1TMFG9W1KDRA/8TK79Pn7Gxx65eTlLUo0PL/KVYCJ6Lc3dQSpBmf+Y2DZCQ/WhERwqOviCO/pLkkDjaDxmJ89t5mvGV7qD9xjBaorXdXiBJ8aLfV90WIX4XQ3c5jbkywJWNuCTPZL19ComuPlogYSfrkKqUtFtHwjRTj8E98csKBzm7MA69JPFNvHnKk9UvMoC+8fXujuJBcDmBNK4wEifTDwPy+GBJailszEbuzElnw8jrEbdsJYkKT4zlLgszjfWrPr2XLRy3dyR4TkEz5jRyl+TsWICqzSEjRULpOWGtsVa3ZL2gKRrgCDjegUdGQG7q4xlAsk1fKr0naEPcuWzL/cl8DZoRGzw4wVoMYkRIob5tyzjhPhI9A2LEwl2A59KcsrPBPOt1ZtXH96pcBqQrtFa07GbK1IVSGpJbjaHVDHVw2xP16l9qviUTmU1UoEgHorSoayL/lFrPrCSHM3ZwajPlpRCGz5/sARLjAD4wiOEFKHQpzE23yhNICne6HRq1t2LsGwUMmIkbn5lkUo/utr6eG13/lqmLd9UY8zWp4Lo7w8NKd0LACbfVuN7erQhx5BLlIvGaLp0uw+ULT1Xztja+P5tGkmtCqQgQ8BpuLBRuoGk3ir7mRZrUAfMO7fpYKCK5Gz387p6Us/LSGIEXvysD9L1iygT/c1gd4cK3aaMQQJJyctiPL1dCsQV7uRTsnG6nsnfzzn7CBE6E2HBn988JFbTpa/7V5OQMgxPtdhYXD3tcmhRxbnQ5dXYEQ3wj7G4LWYs4sq8rgWIz+X/+PhCi2ae3gbEmZvFCY67srXoCCXpzLB9p/txaqy10a3zi7ccCcUgjryTDxxAQHDWHR8SeD9aecXovYp7dKO6YDHQcd/TjXNu5cKaz/RysnnL2+MfFB6g8FJQtOpUgg/wbxJd8HpihKxPa3iEnXtZ1hZ7Rr+4JujlztajNaou/ymYV0gcCgfFs3eROecDG3NhWf4g6SIKHDcJrXVcSW3oveOhbDCRq0si8ir1k5hlFH6C5XFm0K0mMTxXZ1Cq1VTR7/lYjOZ0BhCTZSGuzGYjJAnqcSzt2b3ovbdMrHui3Z/2NX7PPf6L4JFsYn8kZr76wq/i5qdK0aBSo0jPjOhnRj+psIo8imGgaXuHsXrLOs/2yJZ8kMOD8rFv3SN5PgmYq1kB0jDisqtCAg7my765U9M7ClmPO5JGsfKwshEvM4d6W9SceqHG2mrMkv9wyI8WNY3kRZa0wnzw3RpGbCBw8m6POUAW11tp2gTIFE4M6cYzkvB6UFigHjtn0WGEl4DgaUPq4ODHOZ788lMkc9ZGW356bzzHkJ75nGnNci46UC03BckDeUOa5KxreZ5dOY7j9OnzDbn7y2k/15zp7knv+0ac4kWQXX9JXddeAm7yuonMLMC+m0PDkrOjs/cWNlm01yHht+gcUh4ZDlRjhAg+TIgydCPaodcDJyCuIzcKwlXlcJpn/wu2IcymLO+BWazudde8YmMp7wyFuO5HKH09qQxPFeD5lis4fLxfxhm8JycrmakiAkWjlE6FXLnYFBxoEost1wO1s9DzMaYnF/O27aZT4Yre92Vu4sZdjb7MxQj7l3JS8afTcmy87YqzXIEwR8mvzQJ63dLI4FzP1DrFhmdXOg0kZ/Nd2pQi/Ui5OXaOKQqlg/lr9k/3urXbO4YA9nrcgKHSHCTRwmTPY3TFccJjYvP+nrCdqp+xLA0x826oZizxAoZdG4nIL4wkQJvSTx8dE7O8ltLxKumCqDSWk0WX7VPRCXVj6Z7J85yPgYiMlYEArpMtvVbiqCKQX0NKuCWMivS28pLh7pbkyouHbWl2uTPr35QCYWWIuBr2InvL3WKRNhYj56sxSJxZ3sgGnNf/PztOAmOMydSTs+6RPUps31+PqCzA8TjxWek4UP5Qu7uSR88cAA2uFhLZ6QU9nuf/kfRU15ZVi6CQbe5BePnCTNi//r2ueYxnCHXVQ+Vk4OQeZJ9VzoNA1h3rDIQq4AoWV+CdKFORzZHyjvGJ5ka4kv6tycrOewgJKwe2wrHKOqJvSjyBGzAvU8OaUZobNBPADFPTQZv8HFnyTxkQEbwKwIjBcze2oJp4uZPT1h+6H/z8k4JVpIyMC2vRTTPFNV5zFlRzgf78a7VBHrMU2jEP6seio5SntsL6Uzd4hkngSUKMDIeWiGZDcYuiEVBL8KMh+9lx4tL3JXrMj7zeaYK/YJZZfjPzYX9txz2p1X1ynY6ehQNS1KjYTZNfbjQXoF81MeFzDpBKPqfCWqZBmNZ3oUnWryrm0Zdo3yZE5cZ2svGd2QxXBeVDgJEfYnFEyd4MxiTyyqJgyRHKX9FM6tfBaTs7+rjDucyUdHLzl2ZlSpgGR2W/WQnLW6W6NgBHzEu6u8GK3bNyaCxqaD2cHu6gXINXp+WlVhp0SvzE1KjoNSOHz2dwia4KgfN02hqcihbIkatGSwtDfcu52QsJJjbiXmnqkv0axgEY3wwfFuxEyTAmo5Id+rxVXG66vNV/0wuf9I3nEPEqDL7yHiI73OSMAWxpkK40B1Et7hserNofzVbmjGKB7AibnOwKiQHSUqnPQ0QTf18R5SJwXFTJCUeH2OLpNlHWQlt+9mM0HUGP16Bd41lLBmA+sm+RQvhEHdBU/b3eYIV1M1sClTBamrS55iJw1THTniAt2q13ItRVCeGmijCP1PsMJ3/2X99yQq/ZOUTiYOi4N0VnKdi2NKslqRC/H2303rGXh6Dt6V9inMQv6aIDSJ6bncAglXwmYIB66YoSZ/u8SMySfsyyg+6w9lO3O05VXT7eWte+uTiISLWlol6UnzsTSbtkvzLiXGiNYP5qQIhZ4ovluPai5/mVJCWP2vLUy/hbhPwp9X1GEvtPcKtIUiQ74/3sgKfu/SG0RyGygn4M7WXRU8OMRY7f6f69b5QJoG23H/9odZCuYDmQdzoWS1Amg2Svtt9SE2XAuxfN6ezS+JIdCsvC8xqsfKMi9SSxnFTfV0+Pp4aH96XMGC4fZ6XzYMTCUzuQFRyaeu6Cv6xaX8xkg5G5j/e+QM9/TI/0i3B8NADAAAgLFt27Zt27aNj23btm3btm3b6hAd5KIYa4m2ktL6vhshvyOUtE9Ls/b1WHtMi/+kCXFOjlJCn3J+cyY4Vh/JpUuJVTJHN54nasunZzRCod5Na6all288m1K2p4WSjNFapqnSeRqheSwBEMpFAWrprMcuPnd/t7QLfmW/VWj0Sz2SyvrzuSCaZozmAwRLaguCmPMeKrA9YrR15o0lR7d7jiyf0kxSrhvgwseQ/AQ1H3DsgMd5bnUwLVT/lFhTbrEzdFywXWp+sN8d6f4QF2i9ms9Okh6RbRvAXN5DjiQ8lDavfVVLdJE9NPtfPS15WtVvVc4U06Qw2C+q6M7Fj8/Kv2gYH931UoS2KjoeiZpY0cQtWOl88XUR37E4EILFUuMoR26dZ9xzFewz/2be6kF7B7tafZ47+Ilk/CMuOz50Z+ApGyeAIq1GdpPX1YMPfdkqdILUSj0Tr8EojfbqT17LiuFu44d8H1gkUQ0yezQMO3oB8JdMSEZ/8bjLuH3AHPd4YSFMjrypvM3BXcQ0GWBsLczO+/hMe/0OV81FCLRG3L5Hz5xoP0NRt3XfC7nQJY51zdElk/xgIp7aHgeyXaATF31yN5cNxeNPQYM7d3MZYkKPk9dclQz4UHRmm2dR/8LyyKag5TvH1iHIOaq7CkyrRATYa3YxiTDutbaZIRC0jnW5RtRrmUV+iQg0cB3DEqJINp3cphzXOmmvZmta1aX9sQHOOjwx4Geinr3r9q53vV2wvSCYfbsTj0pZWhrGbSUfBLG0lEceQuJS45LefVRP1DuG0UFCHWHy4Znd7xcU+NjvLKNhMocF5wavc9z9ve0EZS0XyGqEbC0nLOM2Zi/9kWAnbCvjgAymc0qoIOS9X12Zzup+H6Gi/BOg1etjg4DceGeLJHKWuH/veiEW3LkZQ+7a//58/41Bhi8QBziLn8cq8/evQrnTIm1yoLFbxlVFU4tEDZFYVxtPSlWZy+kgl8aFvGm3Yon9YnzQ94xgdxPMr62kDvKq96/AB+I5nLgEzVDR3DTTIc/yTeah0DzdOr6Q+QUlUzv2pWaISW8gT6uVUFplk/NO+9kMMtz0XOEiV/f5dBHx3D6gKKyM7LfpElyJ9OaIbP9RqAuiI31ruHIHJXG6yJp5uqPEFz05S16uFRqmxpI4zjr5AhVmn9kbe9/qkFS6t4DsB2rS4j5gRBAQq6Th1lBrKr6npC8vH+jrol0CosXrbf0e1H1xYmLG1Ycvp2ppE9/nSHd1gnwtUSdSwEAFSCG/JAeGJ8XIb2d3dV0zsgkpiKSzqHq3ym80EqzPBkSoCe8LafFp3qy73cglsst/Z3B+oUorTmRQPCQHI8wUGuaD7BnaASjHuc5HKWDuFMTWwGZXbaWMQM5n9ffAUVk9Rr1ye7tJLiSsQ9nUJ649L0HWVdjuZUo4Yt5kLXUzhjOyUCvIqBUS3KnByvcBrSFKZemZpUVJnFC0pRg1wyIslzKHcGIXoSp3ilGIgSD4gCN8xSaIq8y3eUj70dbXWT+IWTLEK1kUUcP0y1q+UBHVfYdk7XcKNKhp4K4wVOvstwSIL5HQzrvJ8ReTMEu8ZCZJxCMp7PivMrUu6nRpXHdciMju8meZIvyBw0lve/GEUyi5j7accohiu2HHniD6Vf/rQYYvCkyFY+VluSDnZ4jq1qySZu0CHGkjY1b8WlRmfgmCGiCEGbps2z5VJ85nfbRWEiAKCZGhQBt5rdtz12uVkCok51dE4zLt0+lAAbrceUmCk97Tu0zEsGBPUt68eeUpu3obPhoLubbS6tVC3bXcMkxJtO104TRUTyWuvQOKCTICRaXvOzJamdsYY3cQ8ZcWL4oglwblxEwV4KTOmO9dOFPmyvznYNDQ299ooo2qnUjwXnmvVFpF40gHmRfHbD14K6/PKS79bkkpAaglMwdwlqHAMO7ctuSatjtiFO9i6328d189B56k3AbCd88+2BVsVGpOruYHyflx3GTibqtNWwzX/LKN3EuCi0WFmKefT57h3m/qRpppF0st0apPxkRWPFUy6tzpuRjwiM5Bo8eCcMugXG7nTIqlYWmRXvtW6b3lhruNHJDmIlZROGHafJZcwQTA4oTUB75sNeBl0Ekrurwl0KjuZZLxzioU6ADW3/9yuEdVwS8XMTdhkBvrW0kDDsTJMHry1aDJNZ8kx94G8AbzQYtrhxnGWUmJZXzb6hDdNyDuHZFu0+fx1RwAI3Ifl7cQTXSN8Zat/AfRTeEHV1571Y9eGs+Z3n83+TlDriBccP1L3e3HLN8zVH0uoC+95tocfkn5bfLQC5D1XEXs8pFJnuGEZK44d6svSo2wAbfPfjpchjHRmQzP3Br9kpuol/XhcYaoEzWTaG2cj6BVksyU4Dr/7Pqgpzw9K6/aL+vSNrrMAccUMcWiXeZgpq/hzMSTaV9r0wuLnwiLfBLazjivq/v9p9WNQLxJlgA1C571SrM9U3VBMVAlMssxA4aIrOl4EzBrY783cqW9MZRh71a87YuP4UnD3B/GcW9A1YxMZGTAa2G4DRZgQqMNd/skfs8QPawxtKLpZoWsOqbIXrUlnzM6xMM/fTEjApuse+iXhvLhidiCBNGS1qqPAornAVYn5CQthxd6SjBDJe129OfGF8dxE4QUSTUeMySMSHaO8Xo2HkbR39hOYgkprAbWKxeWYiBl2ix4uFfRIh6NGT1sP5Pt7unsfNf18zoGjvOi2iT+HSY5HDP4z17gvXp+N4QmZR6BPG+9i8pyT2nWjUJOHsLT7+y5gn0KbHgyXsYhY8r8vfssg/bmPVirvwj+X6TVEZi1ZZicbxcqnVDpQSwX58cq2WG+oIZCNIfdOIpzNtf6xr4A1NLXao3JPwEcGuDoK+8zdZIuEubSdGyk6Jzl9byAFpIGJXXstuLI+f0bnBpzi4Hju6o1Lx8GkmVzg8X7d8NlG119cjk91TItGOuFodTHSjPbR996IV4nWIk+90kxBQoWDPlPaW/1cvODjX/ua487zJ62wE1TVw9Zwp6INAuiqnW9UrIOrX/ugYqD9/AJ94AEX62KxEHOURi3D4FZW4xw6DQCpXS1oLAKJWy7JIuMa6WC0/Uj2mlnWM8LjPVRTgAj3XEkOsxOsWjjeSIelNMy1NjMtwY3UQKGDpAL1QXfJ0psDalEl0DaEPUreCFIH76WNgSr9AdeaHCRiMoYcYqA96N8apY0++uDvvYX+7eJSC0nLPsKI975MiKqxm1aB8oFb3RI/C133vedAATqjeIFOrv6d8fvXYbF8EZis2AuGt14bAK8fdQb5CKoa4HRBvntrymsfsAIwt5q/EHIxjWN6JZ3eEBJ8qbAJq1CQd6CURS8eTmcBaSm7NyBohhYiYYYHKXnhkr3IqkwbXX+x+ocnXUkiT8aU3oe04hHk7h9pvjkGaxI7vgKRosTF1Jqqq0qR6+OfXzko54z4WWC0PI1p8OVfonq8ice97TT+cijec6/h8E+RMI7AuWzUOy5kmkjo4vQuv8KZ7cY79lwaLFs8OSXSqUUs6hw/i3SKkHqg0YDVINd1pIFS6CnIp9ihiM2JKYNDUCu8BxYk1souELKihF9v5fbi4fidCqQH+8O7PwQLRCf5PsLlX3A7XYLP562QZgep3QMh+YfFlMUZywt1xv9eQ8+pWWJnqfPZ8WfwSbyntDvWdH0pWwHK5KAY9L3hTnUQSj9MeKVKrbbAvuKQozq3Vg0CYck/COcfzL0DpkSs8GNJl3Ds0eTJ2pw6Nm9Z9qAvbIxwJQczgB4QB94no3UItq1VqtDyrEibaQkgJITkxzyAP9RY7xuD18ulimsQB9kTCpm+GcsW7MaVb2GATUgl9WtiwfDifhIQBuPn1UqKVZvCt5ut3IBDMA3oCaYLuj0+hJuQHYXDLnM4dlvjhbCP+kyFH5qftiZzePiFK7z5h5NoiJNTFZBBr6fB/ddysNLLbrymfBQC/F36ArqxjWt3AlUpHYkkI/YkXaqnoepD/kpswhyhMupIZcBLllzyTDAe8p5MW9X9bU7Fv47EvRs7bNkro7uZ8rvN20vuCtJKRGJtAiYRdIrPXrpoOhCuSVj7vsGaziUkyJXsuggi45p6bHW/Of7Yl48SYm7EIDknSWuF/cTEP5JdPkMU8FaOSWdKamOytMOK39ePzCaypvJiiJY8cIany7tfpcRMVdDopLvwpczrpSUGlSW/rtryf9Ri/e5G70TA/0WnC3ErqAGZhr51q2FhI5IZA65dzHYOWtcYf5RjsiK1LZexng1BHe2RoN7lljYVEX3ddAHbLo9rxFZ4/xNT0f6FP8F80Jea+rvAAo1I0xckzjOxVemc5I1df6TFpI/jV/CbmSKqm49WHnfKKbseHtWZTNLOKyetPRPKeyn4Y7Gci0JhyIDEebkNrLl4dtZFKkHmqhwJxOmMzDTekzfOTX6Y7H9+u7JRtq+/kYLbccWqDri0EUshnUg7xxyiwToNBQe7UHv3P+g29CP83NM5kRqdl1NTfccMwFGOaLv9nFJWyc/ZgpIXgZn20pZyFiM2SpiBNkPcrmlc/8YiHxH55NvuvUNqkCD9CLhfrY8ND65b958uo7JcunUpPQjV+IiGgsuF3InAd0ebyo1PZXfT99QmeEXp0yHQ6vQa2xcIudqsSVUIivYuI5sCz0POc6qR1PYBfLbmMZz8X2fex2GPk7vmxoj8s2W31NRx/geumLEE/4aLWHIQTSoOjZajv1sZeyaUqz3AqZ4yUeaRdRUhihDJAysY8c1M7G5Nv/OFiaUfuTi+qIdvKgUjNJ0cEJDwfBWnSZM9UcFoKfY8J1hvoXN3djUjCWZ9IFz3fiG3vSurcb+qjT8r4ADaDni0DNhIBsvVCt4wu8xH8vYMrHyOUGv1iY2alJWAHV9N2CMV6dlLjlMN9vgemN+yAN5CpDuJYgzasaocF+HrY6FcDJMU6teezJSmC/wTbpwP3S4oVpEZAQZuqCR7lVLR0wgoWeh4L3SA8DSuV4uuXIvwfI962adUyBuxuiQ8D37Xm3LoS+5l3jbZY3rj3kh/3EC5lqdtf4BO/ikVLamw+WwaMxq6jWW0ISxJTH2GazaDTgFcuFggZDiBEmbH2PBj0XFy9lU7UUzgRcboYWM8F+nFpG/xN2RU2DNAOiY9ZsIF87uK0dIcFvN9fcxbhYxkcDegw+GhZHWJjB0v0NRBGKDRppNIUp7NmfeTFPlXIaH/XzKgPeEu61HcyYWqa5O7gkCx5IRHOJQ6wQ1GB0YbdZeN23xrp7qDOgAQhf06/ugdLClZZ5yso2ixtwvqLS7T9VchswnVZCz2VJJKX8vtGksXbSl31M3kC2qIoAWC20BiwG0AC7PMxvbZq/WtW4xNYdIvLgq1pV1BrXfEVmPPuX9uCs/IURsvJxgPFQt0uGB0qTsZ75ym9hnRsjN5WHNsklAQptBSHjZgb/m4o6u6hNc3sbcFyvsidmoohAbMXiD0PVdnCjFL3nYFTySGCVxt7K4Q6Dc2sythKLca3gqG0L8KW7Pwp8keIUimyViEQyQvMDS2K12c9nS5/RE85wv1b0dik2W0/wf0e/3dx1f3NcAcK6piUz0+toiXceT+sDIahSebOzWqlKkDs8b7tiprcyH8GYMCMt+7KYVqJZDoFDP5w/g3hpDkPifvm5Tv2FUfTG5wTa/n7HF7u4zEi7lP/VjrfTpc6dwqdTHB+XxSSOroChuRUB7xIdzf46jG1QHxRRFzml3WPQdPbt/zk0QSH9/e376AjTZYCOq71rvPd1WZTAF/nmiIg9I1Uz9i7n9+b8BBkbyuFNCkUtj+SuS4n0f1lpBNUCU0hU7r1yFc5SUKmWbgMPe4bWigr+eXFLpTtKDf2cOjKBDWiWWt/ucFwPOOW3ZyMtJTAzTYYTtZmQcMxEutXab8iCHX/gwyI/hIsherXgnQDiQVS9YOcQ303o5eJNfelgLNNFawjHbZrTF8yeryeUbzPQjSZ9bNUh77SlHAG1UNhMzBDFm7bEPdonSYrVgSvw08X8aPbBIAFnkGZxvrWVX0mxOxh50ZVxQSLW7nVcGsuUqofPsCBGJbaFYzXIb/a+3TAOpVgZUmQRs8Zp8oaMu7PoI9VUQGJKGz+US/IJsFczTFI7V5M7zfigm9VKkBkhxLrCxpLHV6qhwCPb0WOtWct85ReKqbyhb/EdjFW9oJE5mxRB1oetphO44w463pW8i9dIJ0C8Gm4qd3uMG45sgqc/pq5Wrwl0sPbJ8tRSVTCJpYIs570/c7dI16UjffJcCz6Tv/BuLPmY7wtAmqHEn2IL5/phNgP17UObXAsWT/gi6kzmn+dp8Pw9LhgpTj/GEg83sonJFxVpLz6amQN+csUebuoG8yA0E8pQMSXNS9jtChuGxKqdsK6OVnyLVf/vEF3Lzo1j5siAzyoxAdqT5NelfUOBEWoXSGuhzK4bLYD9lQKfAP6E0VlsdSbHp+6Ly5voyrQ7wO3k5HYWN6f5S0Vb9EvLned00DNt3gnPt/FzJk4MQGGieGJ4OBtxGNFzc4tMWsb5BOuGhU3kTG1CfXb7Hf2LjKh7MO4XiaCOJmW4ETX/AxjtOxgc1QUOkZOatPtxOgwMyKff+UjsEb+W+JRHGX9Y+LX/qp6hVUnlIeAQmrBQEi9kviqlc+vO68MFWnSqKJkFZ+39ybKfQZ4WZDMEgA09qFkO/pWVpO/hFNQa09y2LuILY9rDyqLOX2Tk/Ef5IqHwMysuEMKEZFwJCMAlG0xnWFCPC7d0YNHl8VUyQV3SeRgSaA6UJQ/Rt72zzT3rSyGMJh9fWho3fIe7q46szZkGb4DDHpftptgJ3iY/K8gX9CNYT2ntuheuZgV2RbrM0udH5y/kGj0p4/lPl/ZJm+q5PGv2usjseOIx3Mv9+Dr7EWElNXpTTsVGgOed4sxmsxngwC6Mdi56c6LFjLBXko2Ys2ks6DWYxa0Nws9+84vMtwsrHHQOqcgS6ZL2yylKDAr8x7A4fUDHWBPtxmd0xRoobtHLdE6H82PXZtfv3++oFIwhY7F+Co+8W0fuej9kZFCkUunJQeJc0YHx9zyNbTjFSI9Bz9B5rxYY9+1baGmxmwRCgVTgzikXQX3BEO/yMWRG5VQ4hsrYE3gWt/y61IuEWzdpEwtJ/FpYtUWBV9Kmplj0uXsK44GdV6I7nvKbld4H7/CJHuaImNZiDxEGrkYpH211SHNR2/Czse0jOygKzupJjA9Q9pZsO3p7K038nxIEIFVjRmJlDfZ5A7MmqBxClW6bv+FwiBmko/xSZrBtgaPKoUyiOiJXxA639pRmNtQMvZ4kdqumHSD3mSZN1Nlt0/EpAv6OgZzfB27xzRzkJaTgKf8DcnIqpeebABSkouMraZSqrSa4juWywchJlAmxsOeekmGaUPRTkE0Fi71M7UrvJG2ep4SvChwQ+oZEEXy0De7evR7OWJM0kIbqOdq0sRGcbDlQlH964K17DpkQ4Pbwcpw1cB89UanETS7/AJatEW9X8LbRc1mGtPAUDIdlX/QeXHIka4zod7od6kICuXj+NCZx5VU9Ys2lhItOdJ8kWZlu4UFcPAwmkMl2QpvfvS27hCHbZNv7Z5qQL29J0z4gMiOGlLbLWp9o6yO/4jPSa13OWbgvepvsX8mxk4Hwtxg/LoaWrOqe0q6PzTLfBNggPLOgNiSalqW+mhs0QYo7p+asyLcdI6Kx8GWiQGKySqQb9Uo4Ahmi+HJW48PH2WAfZDEiusP+dfYzutypWJMx0WxjDQ2fsZKrppKqlE2jqj3ppQuuP6lhw9kTPc+86oAUa3BNH0p5KBp2hX3JMasLxKRt1lNSalGfE8t4LcUrQUfLkyJU67b9YUt+EnwDaApOawBkA0KXv5+Y4M/RboKWy1KAF/NxHF6Z1/9Vyq6hhZXqlcJadz2101ntPU885cuity9FwS9fbz0hayp4BMzQPoTAuKz1dnBjd7O1TAk6fRDu3Zm+358JLfsUW6EgCMYPjbomNABDQKSu5KHRxCWNMYrBJw/errmwAAFVJ6oW7OerEmc3zuPYYU29N5/NFiGi/FMVDaGCmCKqj19ytpKgDbp9+1NNmG/dQFDVRMuUkCFlfFMN5si6J7bS6kPleWwSlX2jo17iwroSLupYwrQ0ajk6d39Y05iCKB27MUGqYtjzKCjqZTZh4SwkgSDM9CTOc8z08R6Xz8KzfLc/LjCrKlanlEe8cr9GE4AbRVuT8nvJcTDIF5143596QWNpupJNDYJKKBEDVWZRBAAAQwM0BRY5dfUFMbgdeI5Ai+6UpKzGjDZbh8e0cpToVJixbX2If+R2N/W1Y3beaDB97kEuWLZndYgIfxLEHWpE3RuCDIvChNDveYSmLUcJPeEForL1Sz+ziDzlHpHi5IS4d4cbQ1pQQtCh5xroFqbqCEaATC3aqfbfzXWvH9U+jakyNxl+spupZC8UuFKOQysEwtV7ujpJXKCONwVBphGag3SqmbA/XYY5WuIgMlFuIlY+24dU33zQ/qik8MufUxa/bvgw1rPuKwSM7XnOjOt1K+wZoltNS2tyIc/JmxzXbxdykE+clhfhD6tfGkgqqD1XTQaM1JSw3wXAsj0i3yRDeziy8tJN9Md3Pt+XAj9poTovKUOdiFNZ4cBDOnClGcSX1DaUYXfj4gLh2+g1uaNIf9jXBz/UWP81WDsA03kMZ7DRoqqb5Do1mv+lrqvkWzhDlq/lUOPUxDIaIsQ/VDrlp6gfoWBi+in+R4fEcbxfDQ9CqANiKQeLePhyxKgLRfoopvJBzMzJE+rlumdOhZMc2FfpNLHn7GlUNJuk0/E00ctd9ZSgd53ityKMfXTN/yYT1OZIvmxwLX8cBcwVaGLFxZ6eqvpYJYWiAr5c/wOLe0Hu/kTfg2i3KWNEk//bUmXUYbR3B46v7mvKBo8HGXBs89ypCmWukJIZOAwhmItIqaLOFj+XDtCVcb/8rUXc/LI1KWWtHzO/j1USspqoZ/0WleeQ41A2Okunv1mPJewhh/BJdIMMuSYA6e9odEOmAOPtFqyAkgWZn6VTZQ/D83BotCLS34t4D7LQtU7h/LYE5wXQepsyhavOQPjM8DFH/poRcCayclH24cN+f0CpfbQkbYaiUwRH/QQhD4YNN/bKZSrJgv4SIV9CPHxTcz91uQBA9g01CL3bdNwA9GGPZK9nqeH5IRfsalaVgXJ2Xh5GN5gBQJPSSz5PatL3unK8OFCpXbaq7xWTVZIx/+FZyDm6qClDq/sFsQcmBFoMfRTV/ZmnYYtiOobdsQRBtJtvJy5T4fsrLTbaOR6Pt3qCv72peQoA5Rt8diRNLuYliEAZ7xL9FfM/Q0G/yrYjX0fx3tc9s04V5Ht6Dc9TDlUnfycEsHu9GEFQ0QdvfibXbSXdWeA517WRSdqjlq8ZREVAGCsJjQS3hcmdZr1S88RVeCAaxZPdmudvPcHvqIs4dZUMlLGMQ2PnsVaStx0nNskb6khDiOV3rCMT3MhQhZxp7It6Eq8g08DASpciCUMQK3NUsjcrLs4Rdt9qVvBk4PrLtkrtGJ1uQnbI5NSwUfm1uRM3Q8Zpxys+hzxYXqjmtZxtH4DD1uMzxJRdsj7nWxVgE9eWIpzwJEzGZcdUNJ3chUT/RKfzwNq8Ll5kHKJpkfocsmwK062hHI8KUnn6iSBSkm1kuE9AivtkEmW7TcTXBfP7Ptds5TFO3xFSld2CatymTe9UylrB8c+IAy4xSv5uOF2qgFp4YWX0atvJti4gi0KEv6z6oQvSah2TF0cnJg4wAfvWp0Kvlx1cVhrlzj++w6Ixto8sO+LLIkjHayWTA5MaRvJ0fvfm3hwB72/G0+3tMatieg7RVz7QLcmCOSwdRdvaosW9mIYVrZLM5lNPEfQ8sfpWcgDnznMYm8T636GgmE1ICFrxX3BRjXB7ucJMetzrVlFwATzs458CLt6X1nNzm9lD3WOZTU41euqKJLbzYvcA4mzCt/z2b0RcuhXoAk2yDcf+uhfXDHh4iw0RrDgWafifBOt6gQYUdQZgcMergfdqHtDUCQ1L6IXR6dC6oKpEm/0lA9TjR8PDu0BCmuQn8z4qdRwR7PsOym7RUqQGKRDFOEGbzn0c/zq3ZKDF+zU1zZ6GapwYGXpcC4Bddr3v8Y+w6CxqWAoz9IjWgZ9cH+Z/It227ip15bAs1TsjyGYQHyspmBMUe6VnssXT4mms4ekytOxLc+ogpTVSKsPUmf5nehxEgvvdFP/OyyVSaMWE1HtHagpQb3N+1AMKfAZVzoUn/eZH7MhREYTSOM06LL16uox4CKRI5dLuo3b9GvuKldObWjjY5EGRzFQL4NIRfsPnIu250GxI1nHzBLmVi+WbAAaYoxQq4grKsv4VDb5BGhmGyraDFla8ZI/TBOO3G86+Zgj+mkIYzqZif7dHdYEgJgREBemKs3J0VytxEpYI708wDhB32FkGjEROnLtgeFvTMkFbMhBKAG6k5Hh/wjiU2TvzdyUHosn0DO3Mvo8U9gdOyVhPAN1jTJjtubUDaSkTT/K7lrdeXDAgpbinFC0MVdZ0orhXgVGbShciVfYo5GEAedblE/+GnXI6sYGfMkhMLnv63o2TpLN6cMXTALmt1ZXmLNoPN9k0kzVMeaqOCdE79RdKcyL1bR1JGDi7bYvEIsDAZAFtLq0Ksgdom3xtWcrFU6pBCnNCOXxYDYTawzONZO65A1pJjHokast5MvOqY6Zfgynf48RDLlKrjeBZrG3N2osT46q1b4sGv1mjyIe0DJx5+WGHwS6gsa+7XYyjPVpOQrVuX7CGXgEa/dqIMZI1VVenUzUS7OfoP/OEGux9V4j95cnJRCL2o6dfNnf03ZMh4IlD4SEChGjw2n9o7Ulepw/T73CK7VGb/Jbuss6XRy6C0gmjnm0vvmwmsB/U/Nb6KOhaOJcCUiNy3CpAdT6nqJj4Wxb4pFNRttg8YWsZy8MSgKs63vyDAhOkNfZ5NH+TSPz2kFGvbFjWA2LPNDn3HY8nn8j/LyMA2+I7B8ryDLShbEBMzqP6dWf0i5y7NNKEEp1YAwNxC/zyhdBNPL1kF8frDsTNG+QnooVmAXTSwsfErE+oHbvOYye4m6Fyeu1Z32XuOAh1YdbVEHZ3kBSb84smXAWh+2u7IW4BfrVKhY5IdzJGghUQ8Fl5EdP2VTg/xGKpk3OqcuEBSSEjLlkpUqDyNnkva9fccauAHTM742zdSXa2iA2OhZr+NgHC4orTOuV1o77en3lY6iWhl9vDu6y++h1Jhp7w/N0J2iBTnXHSPeUWpYw20jyQU12tRpeY1Ni31v8cHf2/WDvbLuyu0/hyWeSi3qm81SEkh6elSM0F3XL4hamUP1eyVSilo9iZq9NmXRr2XB+tnEH7yWM/5d7bRGaJeRpvKFZFsiGWBf49bFZRhMpb0kRyZuGzg/mdYVh0TefsCyGAKXSqdj/NOKs2jGmaQqrp1nDtnaHpYNnY0GNahkv9S28tOOsZtFVst62UeEqOHqOkCa2/sZ9Ej9FMCme1NTLbwS02d+2DJT+Mv3SXaGHRWRha3kw8EtUlleYXd75YL+htldiGLXMdQa6U9Up9N1FFaoxBWU/oEaFK55fKKnUSFlKn0Upub5SZ/ia6vCe2OBfoo17Hup0Q8BCFWsBbBLbzeqRENTZyOBkWJIY1INm8WcW/P+ciPkVXFjdMZ+7uXlxJJIB5dnQ6a1qIynAM3h5gdsOkwkdb/Jd8T5jqZ255DXWXfT+f7m0WvoQDBQqllDe84P7L9UOvReODrp5iG+0xQWj7y3EJKRSIUlhVuX5g23+DWRu8AFZ8fEB/pnJCWjKCzr2bavPSvfKKRnP5ImeDktJyj93XXYQT/gJUiiUU8cjqjK8ooMh0jLlviaiWsNCT5GT/6bK1JkSFJwRKw7nafc1EJWBsNyNV1NN18ovIkHSOX33Too+ImyQXSUF13AhlUTSKsueSPJySTSKZ8APln9BYWaQtWypzMqOrU3qH0VosiKkXM0MLNBkNnGIPhLRuXYXTcXYlrK4cZ4/nXEQhwckW1+/0KmSqah5BETgiMSpXBOSrSdQ2yhGjaQNnmlGyGz83Q8oE/j1bFliT0zuhFSXc4fpRBMjfmF2ZRl5mQML3sl67SvcBVmBOfhSjW2HiN/5xUp6WaQtUSP6OSVJnNBqIr1v6a6ZLwrkjWmUR0Apav1cnCQJrvogTzw5YRGuUDXwjsR213ehsJ3cg4Tv6jRdX/PMOH68uJ9kF7IZZfjenlig3pSRFUUXAZdf+jd4rlAq/gXzJSs7Itqwc3KXX54+nHmXBA8dFN4qeoEsS1EVLVRBesWpISeGrhJwhFKP+1d1mYDYrWsHkv07bPbVdxIaO3uPTC5M22FdJ8vqECeQE6LYKjuiBawpm/LFgAlegkLGXNMo98oHQqv1BG3t1KuGrtnt58q+IdUW7h/SWIYSBjZYF/7p5Pv79liG/gshIm6jGWuc84T9YwpOedNBq66jMiCEeomFNlfD7en5aryasK8a6YmlOlrIaoR5L57gkgLibkrV9FBCvd2F2QeIdBn/UXvHw9w40nlxJwDxht4pe/FbFqwKCI9EA0DribZ9E/bMItU/FGdzKsBgsCzevZaYQnoArifJAsxVkQp/IbOAnBxxIjUaSgtllzdLTkSuf15qNnH6Kx7Xfd50ecm0TPLtfPTSP7kLIilTdnFyqe96E9JJsXArNRKioEJgpwd7CEELu4soijs9lqkVaO8BjWBoxRGpIqNxzwCJMvIPUwPpTQn/LjvxppBnQg+aDv8rPp5iknnKAm5VI3zRCzT2fd8rcwtB00cbzovL5jrSYpnTXpLwYEKNqYmAqmC+8orY4E50ZhDLZKq9RtcbCDYLQcH+1gdi/8TKjnJaoLHDm02TwijfjNAcSSFJxM9dr+PazodqUd35ZIXO+RqhYQ2Kidbbzctcfn4bTQaWX71Nf+53cKf1WFPZuoL9G1OLBCb7kkiMrR91Gw18KJcLC0IS9p3pCVbxLHn7ju1Q4YDcElIxKN12nPWrIAfxzqwF3lwpZR1CQb/6a3k//GYSSRCjAs4Fs+XucbYSjBt7poEOQDOvpmFOaopdxDBqYeeB+6ShNYR5nwYkGfXcSej1UoDHqYw4D1ro52VQOODtd1AZe3kgXJSVN3PuFI0X48RIQk3cN2XA3bH++XAyp3+C+lUkEAEFA8M9Tk881CxQccWxnprYM+zR2VOSrswXhXPhdiXnmUA92hq2mYnSZ9YwSbtyExYyGvsI5uR6XQXiofDWVBvghkqmRFfjjt0m0g+ycMe5SNL1EtJ9Kx4S5Im2LBGb3t9roPRbFjiR5KIHQonf2IAgpBzdNwTCDMA6o5XWlgXuUYP0wQ8uI0zoq6Em3Om8iIi1qzSWVkjGTCOiPidyu3S0wR04M6WBEZm8mMD7oSZZigRCAaX7muUXhG0m64IQCjTyhvmaLvNcFruQ/I+wsx2mv5veHrkkr5qWCna022k00DcqfbWxg0bYQbuIRf7C8CCvRAOzcPUVZXke+g20kkZzLadVsQdZKAWxsp3sK7OHc+fycD1bIVvx4oRcplykdeAM4WbBfqqaWZnBKvmkYU7S4snviQrxOIQI5NiLUSHiUxmCM1bEJBtCQMmuiyggCUp7apKRhm0N2+JcaURXwNufMqrbTT0xdEof4tg5i6Bmv0+Q5cMOv0/ia2EuLw4WzHpt/wNfbi7F8xowFx3Q+RHF9d/MaoWr/MBI6wxYXhNDdcfRzcssdVB8fccUqr2K6iyYpMESOZH5wbasIDQ9RvHyWNOmgoWlFA19TYtnhB5+AmltURvhkgGENM/3gRTQ8LLNBVGnre1sih/GERxOWTewc+NJ+xnNHgy2agUfXYv3Gligrc1rSBBe6Ko2Zsfz/mGIh2ud+atyWvCWoS4OYykr2GFX5HLv4Hmj1VvZpOXnNHSNTrEZeT1q8wtsUCxQh/tt+/yQXAOBSk+13ZRXsRFyFgQFnW+S+SgqKpG3BPgRAVbuQ0PDdQYrYDrDDwfy8PkzkTaNSYqlEWRJvFJLpAqIjmNMhYGjnR0ked2Clomje08Aw7rmE3Z9uVVZ6YlQhqEfbs4+nOC/niYkqDngiF15Xbf11i4XO/yrDb3dcgqlIcIMRnRFLD1ZtG1PVyAdgPpxDahtXD6NurcXkHiQkce7UDwkFnaGUyYyB8UBPCauhEzpXWLCQcua2AFtZEethvxZoc3bX4OLIkoodv88+pA14lgn+4OFTBxO/oiIYgp5f86Ltb8irUaCWqnVjc6UkvvvPIg60bEZpBP8eKFYM2hmRVjOp2GARYNenLA7M1+RXRQH8JhkcKh3/+K2UGvsuSwvCkfvmwffyG8EDxwurHRoQqtcoeBE/ayskGqvVaujClmSqN0nyy71/KAXU8x0NAfdNm6YLfJZFp2VM66hKSrFqnd3E5HIq03NTNei0mj8jMF3yQ7vMcYxeooCkocfSzakbmMHxkTUmqZA/Op9XnOB/n5aNIk14N4ClRk5Bnbl6fvezvOnwZb3DN3a9l+JyF4zHVFjTisgqi/Yi502WanY5aMGWWmIlC/oAD8qOTwVyuNqSb/jzU2LSAtN44H2E28Xb5C14qWqVHg4Qh3xN++T+gIUrd/P6/x3tc31hbPrmr3UlywrGIjKNeDGTKK4ZpP2yrmOVrM9tziRjj8BAj+e+AFHDr62qGWBsbvHYKJUjsPZPdvEZ8E0CIlr7IuALz5xac+LWPU+slWEIYJegXkHkb0BhAtk9lDs+PZG8EgYu8FFf5slCHrwSR244zVk/1hxChME6UwFUHYL/1dYwhFwwuSmd9NLTSKiDFa1LvP8S1m+tkxUx9gxbXyEEIh0Q4z2UcVxG7pls6EaQOxbTAgOkOPXryArA7BJMqzPP8DxImmz40s2wJ565cu8Do9SkHJ/x3kUWdY2kFvHLZe8zNI60jC9acduXU1ymIsi/0rVRtDThrNt29HNtBGVqvGis+BQGHs0WUtEqBctWyU7O9sq4F8Puh0mmWxT2odm9bXVAMhjfBviXQYRctPxTwvWMjcFlfBJPF1gEtzYWVuiWNxcGBkl6Ib/gaTtSkoZKzBQbbWluSy1cksnNZUnkiljKXYObKkw93eYEuhwZinH0oiUfjnfi0Xrsst+9qSloGbh+ZM58K9fRDDG49hOf/8n8x1FEshSau1vf03T/7XZxYR9hzDftERHQbDHthqCrLOtPlZexJITSwG28OOcZDVMphMk9e+gmdsm1Tblt4GjPySMruSOW3px6FDdtGBzAabA0SAfcTmfylIBLtU8a1g/bE8zYosKbNPDJX8R58rk5b3EbO5OUNkI3b2Z8IAyiGZrPE2Xs/Tam68oJGARhgecWmTbeE2XVF8T+dCTRQCUKAHZ4HMjBR2j2s1+D04WeIhJPJMxf5waPjc2A4Jyma2Ma5DXP/MWUgzYFGmIm6YlA7IH7xvn1FJxJ0dpWp0j4XUaLujnubtsgu/tAickrvHnLNKsJRdiOO8agpFSqqwHa2ROY6OQr9Lvrp7efcyaQRne+2amB/pKm2Eg12m96xdc+SInUxAMyDHXt8VE4pxdyEmvtULO6+TPqYBlnZ2In+tnKJcp1lzEh/sddMp89S0UAUuSXQyRPq+8xphk+VFbbv+kmoFEiBDoJLaOUnuB+ZjYGa05IbJ4iQT8n1q+RIMOoJudCX9pFSqBFWft7l5bXfm1oFJxi+faEbmJVu0Mivqiz/WUICaXACnRQhIOUc+XrHsUW/XT+M/K4WVmMg7dTHgoEW+2jWaqJF6SQy/IXjdjkrLDDZ4OWMm7mtIK7qvHwnRGRE2YgTAW6N9cESPE2UXZ0cARZemUJqUlJrVoCX3O9AIQckpLqG63JngayDmslXN0cQUT38GjnucqEGLolu9coVOqa/6PWU5CD4r01KiZWygnQH/XpTV+9dqAK1jm2Ub1kES4Os1AlTG39GT7GrRZlarTUeFO/WgGzSbN6O2PTU+6i5lXDpa7IeZkINnXPAokWUxABX4L5RveLJ1XQcuGEWIn7K6xfiZkhnjs/zjiRExjqqMUEvK6WY3psKjx35FrGuNvRgXpyWz9ymOs/GvekqeOza4VIFK2sTUlSk0OwOWXC45y821QKW4ZeWm9z8s8dSnzc16wt4bjCmxHx2u6tsfqc8zNmUJjfySsXpO7oiPPAu9Yd9yh5cE3tlRVgf0Qozh9OwALJ7+MNFJz2L4A702g7BB96v3r9WkomkOG8QmYcImMVbHeTNgIg8gCxCnNfONj00by1EiejTDFqLU19mZoENg/NAS2LX7SMrr76Pdpsd9BtNZm+Sm11TMb/3LIAzGVwhukPBOFEqN6U+cM7SW7+gxzW441S/I6KPTBcXSfhnPrMtluFNY7DsVhbgxAOse91EfO711fO1vg3cbh81qtvxM7My4joEmkHmH72GfeQRE5N4zsOzeU8AzY9922RBXskMveD6MuKwj6KtiUs8b5Fmq5o5gSWEz6/neKsyiPGD65Ar18H1F+sd/MW0KapB4hUCWEDH0P6YW8nPBo7GcZNylR84cHvzM+o0XDIs2hIOB4jltaMDVDEn5ojCs5HkB4RJsHZY8XbIExHO3Fs0Fwaqzh+0q5LleAXQnZ9J1vOOPTwDS3TLFplHoc+T+P667bPKCMpEmN5bvZME9Nonoy9dUMEAlVOI/fCOlfXb2TSagU7JIZ0C6r53ih8EzdQXD9IF7LR+SCMUsJU+vDDlbKVM3l1m929495xzlAl+nedlwNfBw40rq/zN+WsM04zIcfAOqRGlp3TNj4ISwzUchQmEhkTM0f2iPsz3+G9Ggtoe3RJVidMU16LdKR7uCE9cZfQMYCpRP/6BQWfJPuJ8aAD3o2RaawpjkM/W74IO8xacRWg500LMu8jJofiZuNSMF+RggMhTeUz14a33Gxa+AZXRFNTwAx1mhNuQXnvA3Ze9SJKgaQHrAzzO5wLzJdvkO/pRj2Cn1x/JsbC+orK+Z4Vvmz4LI/B7HKTY/y1ze5GvOn5pGLDdPva3corFThC4DKdhDBKj9W88pksjByxh24SaphzQHDVS3cBPxWo3lYHcccAlQZt8Ox2EqPuKF9J+0/Ky7LvyPNdIa+ws/ssReuK6iue5wkcDIvxQc/E9/JHr+HQPIRKlqNiSlP0jXz0XL/OtdWTG+YgAyz8N3a2lwXBbDN+S0YZ8CjwlQVO+AL7j/nToTiidKkuTl16EtF/duk1ggSrfwQOSdclgPdMmowJdosWFKjUGyWt1OdSpWTexf6mXA7WOmKzj5sN/QqenEeHVo6mC0YrAlL5VS+NRCAOR3/QaYJsYHF4UITQZWe8r2woCOWEO0zIgsheioF0Ijqg+L/rzOlMZ7ZrLn1Rh41FmzoIlniTef/aq1hbVlhcd078wOIMYzvPtthDatMTENmzYysNJN0NhiZQofcO9yTjYkHqoKGqI5NnP/Msf3PjvRZttUKoSfJUrzMoSIvu8AnseSYBspYeJEpzwczbkb4gMMyE4Si8hWkr5MaGc0cNaacVKhyPq6pR0LXre1woo/ib4siDeL+nNoL9E0Gqqwf4Kl2OeBGNPxMWGPC2icNyoZA4yoydH2XaEQX+Ai13j+vGl9VldnWSsqs1IbWqsB8optbek3MXIipqOgUgtV5vyPLbaXRsZ2nz6bjutXCrDaQ8cfB5aiiiOmvkz9PFJPVzg4tBrMLKQ5M1WWtmdTrphKVyaR1bpywEcNEU+tASZUNzVEVMAar82VLugoYYO3HoCCBPttyMV/3ZJ/vukaT4WTq0nfoe8j86JTHF0sBHyZG1uiezNOQQFB0TJ5eZGHV/5wpI0kEC6p0u1Yo9fldkcs50k9bhQ6lDBxARF8Kur3fFbB74be7bmDcwLWKOqEzSTHxXFn0pGFfupLIANVVnqqbRV0zzLDToE42UWj29DvsDBcbFw5oHEKurUduP6WtOzgyvrpMXPphRNfR2wFO6AtKOG6vwhGyY4kZjPjpfEe6DalwJQUNqaDliuWySZgDWo1YiwNtkrDAjjGH0+QbCTDbCvsLhr4wEHKZ9LQ42+WQenSDj/Fzfdch89ENajus0yKZkpBUKZfFX+FL4P3MVNHg3vlITbMzFK7XGBWpbYVZ8Ze6jfoAzVRRig9yI9Lw3/cWPjIh4t/RHPuotrz8WSG+PjzNzipBBaEU3X0HZdb1BeRzGKr73oiqthRtDBWZr3I+8BMhR87k6BnFgR7dL8GQlfyBdyGfyjTkewLczb/m1oP8A7C/yT2m/MTBr8NRkejBBe2bYWu/CRjxIRfo3EDE1VEKy3B9Tjz5DwROeMzgg2QQPH329yJcN/p7BOSwfe0uAuPGtWa6xPNNs9aZZHyYTN8KU7ITzj8ZHo1fzeSGtw2P4kswGd7aqlROBPXioDJeOOh3a60OsH9dPSJ3psLYowFOfaMm1VtQjuYgTHBZzPhzLfxWRaE79v9mKo0/TE0nGiSrGHm73u6O59deY65IeH05jtdhxCSHwkvPw3jEZruJp0SKNQ6q4fE9PRN57ty9BIsGDnMTNzUYmVWnpIM7v3iTZhsCijYMdO8OH1BKa4HdOIwO2O+VbJdJKxuk+vGUP5Lk5bfDCz6pWrKkX3q7oUWEHBOAzHvNQlJ7A6lahjFrR+2DY7QbFpkoj/PfCWwy527QtMIlgjbAwTUHmN/dY4eyxImb+ldnSz0pndX1dsnxLmpoZ5iB6vaEZ89eB1PPkgr313K6+X9ftAJnbycwQ9lWLvGkrjc6b3HY4pc5hDBOAUy6b60ehVJFLgbsQ44hoYjjDG7GAY/dIC17vxwiaYzaJv+CDGi41w9vTYQA+jVz0rL/LYtKNCbCd2vocYkagYZnRxfa6Cy79EAzRBXgUjDRvBGHRuKG0b6ZWLmzZKFE93oBWGjjc3ESI3YT0rXQtOsmPdEC3BoxMmQv3X9vvgXIttYQPpOAINJMTmEMVWkoulEKEjMJmR9vlmmT2BBwl7Ax3ONUql0uDgOSMF7bqorAOre/CW7rcvIH7VR3ibvPE4S51wBUBawlFSG9W1u3UzeRPpghKRgSlN49jz5/on4pPUry2frgAxWVmQsqTfMso5XBxJovNCTSM+/PnHxCWVlahjmrRv9Qbf1rv9EoVkxd35M673hCyIodC2hracYGxKDKQaf0K+tOIxODzzHrrkNU/pm/3ExXzmWo7yK+vAZxmyqQQE4yXUdcSLiWH8J74vAqL3ZoiO5E0B9Q7Nu1RdzXp0VLuzQu4hh2JADWi15vfMK+ayMvhpb3xFZIZ/wez2Ejsm2KZuwcvRdbuTrxaCXXzFBUr2DPRIFEUBsEwSuh4IRvS4cmTTP/P71qeNdsUencK5dwDQ3962qpM1oUVnjEBNR7Yv3c524w1AAaQR25WZykCkE/L9NXxnuEjyRdsUIJEmH0hMq2W4gGcBJpCCuMZ/rwJR7I0YpCNY/jg4MCXzfGnVFEZFOo1TG2dJ5jHQPg7MLm6FmrYuE5IhgOIhxrNqHy5eskmrHrLkIAC8YPAhegQH4zH0q8ECZynjRnbkAibk46BDq+b/+n8nns7Sk60MW+s3x37oFYmxNoCtv9L+M68pQ9REJabvzdRz8xZNneH0CkJkl/BAWTsyg3xTdg9JCcBtsJzcpjx99buyUGEvF1kWXp9hdeeiUVoSKGHwdElw/KWgVZh8hpU1pC3oheYQjAg28gmKXeBriyxbqpOLNKl6z2Yhl3r6XNvd7Cf0ysllgFFyJiQM/Cl1LSJpy3lCZFLz/Vx/Y56GhPCvszLwY1ydjLvQgk6zcv9Gv825kNwn9R3JSvNZaxYy6R5hK4puocI93+wVIpsOBH91XyYvQCq/ujE3GA3ZlXVv+wq5/aTm5TedQxnmXiIrxEBhR6+YmFMk8iQ3kCpECfsympWWmXPk4S6/WPF2BQgteEfUB1IiwUzFGErmpxGjA2LMF+sd4eNoic7r8Yc8o422oDl44rbM5TKsyS945bR5k/tvywIiGsJXOrgJhwuB+Mh2zK7PWVnoNSuREzGfiDR5uiUWoGngt1H7coVQ/E39pnaVB+8ZD2eAH7zspk8SD0uX8uQ6+VaTP/I2K2Vgc2fXr9MHIpCrlWQivYw3iqO1GBGLOAbAa6VzX58YMymtraf+FNfCdfZ8ULSryqu/ChlTkh6Bxa7NcR1QI7aynLKoUfyVnusc6kfrdWkSjvMWAvyNxA8j/x8PwhRQwFwuX10eg/yjLOrBw/Gth1DDZ3rOqc2iFf17YyzN239NQSQJ9rlvxIkGkhP7o8v09eCZVZdbu8ihnEGyFWzz5DElUVlf/N3hVEAMv0RpDDRq8Z18wXTalf7UOxSJxm1Q5affAgNwLK1kxn/6Yo8RHf6h4xGP8i/fiWd02EkOYoz1bv3s7Sf/2vR85D6SW5BjLzgrTopwbZh8DDvVinJDR1hqF42Oli9zRITLPJgBGo/fqpzZ3aN5Y/ByPGEQI9M85WZ1+6JFJMthOgk5kYu+HdTwhWjnJtsnONl7bh6aeMfk06iiWr5zEHB+zafLcTqQDbmvxbaGd5JdipFM78PbVl9Hn9mZ5jqX+PU5+JwhSTKBE1GnOXKkYFFZxPcUdtIJFLcf8EeWCVL7HFvCPyiLBmRnQpNq5VcNXrtustSitnorECZBR3wxfn3dXSyubDfG78udC2uVF2pdAylih7tAKO7PW1vMV/7ki65MgoXXTPaaX3stmTVlRZry071TzLO9sVW8ZeA+zYeWQdj6hUjNpW47Cv0SE8ZzHC+A2BUKfGFebvCh+fAOEcIcxUk+Rq/cA5IQc/JhEYyv+e5f5nejRUlQW5GgKgWyQwtcKvOy1KvpGkhv+4qXk3r70jv1b8eRBpdaIMVTkdgrMIMylfkm4XzWPnUzo4vS4O4fLW99Nx/cIXIyj2eGyhDfkHZ+5DcPdiMyrgIMlL6mvgq/dxf8N7t4HyZ2IW+BATCshuS5jVTE2GVc88gOX+tdVLGnYAWMd+MHBzXoDgBlJoxYLOYT1DCE2kLcxved6g+yMvxFkpOdmapM0N0Vm0PUyzKqL/UIhi5MADOH9nXZiy83juCFQFSNirY0zBIk+txXwdZIACeb5Nkeb5H/QQjVBmDVI3zTxiIuxYIOru8zC6WyIAxSVX7sGSqOyRvmFANfSaEfh0aIyQxWEXPBlAN3z8TzOmhf3wDORu72J/cUo48C9TVQMQ1C62NFL02dgKuH2PZdb9eAwjp4prvJQ2WcBqJeEgIFyvplMokJZgVI14lAwoEqlaBzVikKTk7QpddJWZZfV4Mk9UDciYm+LGokPW9PyKpT5le0LbTbHPcGI/gYH9xgucFyAaEtfH8Wvvx5kYL6JhjK+ijMxVyO5R1FGucPhdy6m+C9MPYkOY8AzX+Vyb/pawYchh7abwXo8wCoCyS33Lqe1aLiqQrJGaW3JG+sADPpdfm2dbqkdOdEedfFXbxgkK7TGdV+qwr+O/Sq1WMGhwoIu1zhl/C7SFn2B7kVyMfJ/f1F2+VkFCAW2wniE10t9fiogce+FpQqjh0pf8innuiqgvvndybtskvFlIo+gw4h/pWLdaRhDuKAZGYe6dGtzjAiDW+BHLJ5PuFhp/56gO6SDSIsbT2dLRj/WUstNKkAPaX3D17mjWGDOungZFeuWSW9cCIj04RHfwulbAbPfNmo8OS4htlulritWNv6McWCadMVs+Vov2txAeHRjks/rwGrishz4woftxqusio6EP8+Fed1iagOead1sDmvy4dGPCIItn5sfFAEuZ07aDn0j9XZJVjxr/NbS93cG7sWEKiI6Nmb334xkyQDMw/Y/lBAnuj2Nx/cYckSWQdhxVDf/aQIoQYgTJ/zn7peo8opOmRpHyQne0AWeFWtpihl+3Ooz6IysZwmosr6wpRoRVgIl809ZkFv8n+k24NBKAgAANBs27ZdP9u2bdu2bdu2bdu2bd4QN8gbsiZ5gbJcJ89QAVZSswHhmD24ujBV87+rwRUJ/9cCL+glPfdWbOHkFZS7wrUZu870pxkowQXbrM21KIqvPgd19hd751dmFzy0cfZX7RBlGcTrfJSOjYqufio5URrykM2FgXfTvT0kVpUIvOkErggBQOMcN8QYh0avszS0i7VL1cLkgMxjcwdcG1yKt0RWEPIOj9Vw+yMaPQWFeQwAVLMYxtQlQL6PSx8KFsZ8rSZVq5FLTkel73fL4mN/zt//l+B2VSK96tHHapPfljFi64q3SaHiHe4x3c0g+Su5VnfEgSO5Vlbh9VOKo884gCGhN3wp1R9nZrrq2nsBIbaY9pNk+cJFHHt7HMerVwaZoDt9L4LAtJWUDZBUfYThI8E34lrtGF15Pm0TvJYfUQKlAoLYNoRcsEgGTREMj+QyXZ5LsnHGRLcM/CtKkag1lv4QBD4FPfLJOkFr4K7WaDGTpZDd5GnIUJmpOXR752YMH9baKNEvHzV1qxiGikpRqFd1wnnF3PPVmAy8LQnmkZQ5+eYy7SCwNl+38Fwv/+rHXRqhPRc6HpzRySi8FgWzAgapLlJihXiDDSka47deXZzwBPZNCPtqOnZCrJpOuJh5hBNjoLbaKzcb2T3KPNG89uV6dDo/18anporjJt2xHZk62eLhwFUrRAZhynBYhJ1N+sn714SIPHULlTNZOBYFtMwc+eMQhCKR5S/FSJ0jkSHTx/HqIcnfkpiyYyx4dYqSNNu2Q0fGH2TeCUbzs+Q2StgfkplNGhlttwYMwZCmRtyIt9GoLIO9caOKFwVRAPENmA6QsV28xCESZjh7SY2R4wXlKnkJFA5zCF9z+9rUAvlckKkM7Upa3YjoyZIWi7UVo4zOOLwBgxF1Tq01zFeRgyYD6msm9/QYlm5PG1zN0BrLwzSLZ0Zm1S8yW039DrdbXmQtfPpy5wTAW99TFWvdHWOFZA6e3LwWm9j1lWIrwRjSTKRmH1uDMlZOpkqLIDFhulX1bK5uTJ98oCkK2rkLXC3YVHf4qmzlhIRnrlmfKnHCKYBmzaCQ4oaWEpc0SDst1kpclsN1/wSsP+oFGd3Dm5X+c64HR+6iQ2o9RHLOSII3BODvCrm39Jxg3MChabcqZp60W6A82QSelAzkLZHkUYT3jjb9ZA/yUblCB5wM8rMwaysr/i/cNNikymWE46serAAzJVIkV2VC18jWQxPAehTnTHq0QBdBLijIBK7hflGUmSXh3i7+db2rGuj6qLHxcuztBu34xvS3/GPrYqPpqrhGumzDUXGVISBUtf6yvX8RKHFA1VcUJpbpuL4dh8SATRjdEZ9B8gOaLgVdawBqNCywEmIbheyqmlkL+ygBZ3nqSfVkc70kLyhCCxowGEg51xklCzr8xxNst8T5hCObCycqV30vx/SVF8SxHSR0z8bGQv4n2vKWBqR1ciWYMg2bSpSSGo68CeiVx8RBBzm++94ODw4i5mQdjcAJJ97kgJ52N7ox8lK1GNhD1WPnt6B9uoTa9zVe8ixL9XIR8pTE2e7Yffemokaw7DsltWEXdNXxUpvnSXcXMQjZTG8Vms9N76TkvLSHGygP21mUsczMhSgd/WSk9Gf1yhwmVc4lp0UOFF1ryhuIDMmKVXPeVayljySIrOPrbhUTxEBU0aYU5eMNT8gzlEDQdVme7a8dyc7WHMWpJC8pPO3z/unV/a/L+/oKOsGqgPdyiN6i/bc1YzHtL4wIfm47Li3Qyah7SHUg4CDZjVXzT4v3zLz1XLClDPjl4Zlq2YG5RZEkx1jZqi8WpUwQlRHzyLUlfojai60H4x/a7b/eBrgK/5IwSs6yEO6AdO6OKXt+T+AfFVXnwcZFbg+TmaaXY6/fx/nm4RE1Th8cp2Xnh56cTSETbmrKz2Mo9euitL3H62LrpVDH3RRF90jKbdgbEDXykBdnlktjguYRA7PuTUePRPsVUVY4IUQge5bAG5KznzIJN+fVhoztz+NHAtwjoW1wTr0Qn79R6vXcI7c1giE+TVa8fftkcxGPz3iIzKcaeCs29Fu4H7ZBpG0yRVPekv5PrDjL+p6LXqzozqI70/dZn2kPAMXP7Nz0n5g255GVyZZoaDB74U0dnE3HVKS6/Nn5gB7B6vlwRczr9R44O5eDtrlLcGUQMg8pPpJ4mfSIptdYWT2zeC72MP9kbFSUhkdOICn8QXkhPELEvUF9N8cwN1svI6ohuPRNQ7qgzvOHlxWOM+DasVwdMgU7+3Sob1eyiDF9wOfKAamUrctZnDbQRf2JeUaLyC9DzT0CjI+Kw/uL9Cah23bSMmYAP61TItjiOIh/Ymxlzl8HoJ1H6ZRthzfPPxM+w2fEW2KyD+eRMxZTDO8krmDDARe/w7NwiknwRS2f0B1E/Q66SryB08kQy8TvZU9YAgIiUm0HSf0ezKCVpDs8yCKDnzwx/8bNwwyVjiJYZpTUeNg3HXB8qYrBmUmhvkr5Gr0PB7nDjHc+K5E5tyr/wdfXEndJjAy7I2uD0DHX90lupvGZKfMCER/7btaMBIu2ABuL8U9x0UdJX2752DwYf5tyP5sSeFh+TWkXCWEr5vmz9wqcIwkqw/Vgx2Ola42KiOliwlxJ+1FqdzMdPMZT2YXbbZKBzy91ww8tozmRoLsj4m9tBrMhEkWODcZCWTxPFNZdqvd8BLAGDa6je1tss8JkIWXtJULSnWZv18J++4myKHSUMiegc/1o0BRY9acpJBKeIIx0ksfBgjmm1ShoGkMYPclw+uwjGo/qm1t8Ru80T9lMP8uS4hpL12jSXGazoTe/4e+VWXsFnI95V5NBjlh2sohpDT62VN0GlcWzflPO3Kwevanlfix69BMMay4mRjlO0nNhzEcRbidooLQ3fBs8vvLnfrcIimRjU+NS1XhGtz9I9Edr8xDq19+9XxyihQxVSD8RPeQ0I86D68HoZhkisqmeU64Anj0BHK59DZdAqOUxnREE1lnD9UkAmfhc0S6ihkd3DRFJpTOc/DmvXdLLholNTWRqQ97VxPhgywjaZCMd3HXMgNaMck0fQDBuKEWLZT/M693tQDQKx9gL0u2KSMNNiTCs12Wd1aNK4/+KORCLyjIJlxUuj9bkLTKXJ7CnudIrkkD9ANZ0XnFvmxj26Argx67aGV1BS9LWpS/kuVfQ1ITZxFomAWLK8lheNd7klPpsd/twkl3I62UftFOB7PSHqrx1KgiPd5so8Hc5hnMK4O4b+MT1FeM2CTNDCP4JiZr3REEq2l8/JJEINaBlT22Qizjr1HDNm7Q9p53bvDmqfeytNr8AzxIN44uzNUGLb8+4vt/DwSnWUTgXcH8Eh50hMLuyiSz3SL6UQI0iuYDnvvYWwIKhZL2CzESY8imRJ7+znAzwp6xNcshAXhMkAwUvbBbbw6ViwzmDN7qpIpNYWG+7qeLBcSvG1k5AwRQXrSVS2jLwtR6cbkoCMGblaY2jtPE+HFOvgwffXOAVaq1+b1FQStMQLLw8LtD/6yOmSlpBWvBvctfxknlgvmzOq15cuq6lg96zm4O7cx0G/amjMA2zo5jBSqxENoULRbP+DCXzKAy6LMxrpOvstEE6OeC3B8CBVhv6EwvlWb2ZX9W4iD+HnNC6C66I+tD/kiV6/6unqyB+fnNo+tUjzYqporofnZSOi2a0oEazXP9GrEcyN4kFtm7N9Irf6isEgYexGu6Tn0Qi+xcUqweZeYFUzo5JJOfgoNBahRCYLKJKqDcw8+58l9DCFaGIG223NaY/4MOJE1AMDrlJcVV0FSfWu0yUWDDuHI+kQ0mMyuHOAFrBbfsEN9/9KoqRWmc7nSQ0/7gN9feWZk2nNXd0kzRpf7VJtS0w5rcWDNl/kaZ9KyVGTysEojDE9XiQZ92qWF9NtppDmXnPaF2WhR/8wHzchal34oJIxleK3i4eN+gs3tKw5KVt1zhmn+nowikyYZuo2/+AvjdHhOi5tXiKp4z+s4Gtv96yLkj7ixzPCYdUke//sbyiLpnBve55yAZGbWKC2ECurhGvIbqq3g7Z537hiywHq9dj7M+EgMVFdqNaLSVApJMEKYZBkMOwGIse0R8jMPqwsxrut5DGCnvRUxmXmTQz1CrD8PX1bQlnpOcFKewUYZj+PTV9b0t+L7HkYfHVutBYTLoLkEirgMXmy89JAvzeXpAwQybq3y/spSvSgNnVQoFUQQuN+wDW5CCJxbnFCHQlevAY7/uhvNauzQA9XtuHe24p6ZVih5WVNeoCtTnHR9HRTj4g5CHHwIDmCgJwFzV9QMyKjhBL/kAJ01eXbz9roJEVLiZ9cPkG+GYihhuUXUYpU2IPC5UjuTHk88svQezNhT83Q5037alC0j6vuA3lBBlO5Iet+ahQmYl9CbSr8nliCUIf5Zw7Xuf5a/WRWXGXh3xYHNEdNOXLlck96MQayEq5OO6kvRLjmAvpl/p4jjJ9kqyS472heosG12XA0yYXUaeZJQbUz6NSr9ovdWxvY64moJp531Gp4uSh6xxZrtsxCVns1VFLJ9vQ2vd562qRnO2FXbTUnNwPgnNFMqx1E1pIocz/7uZN/coRERnbf6xSKR/B9nWIE1COaOCtS+Bfw6JRLG17gZ+jUZwHrZ0xeoAekrEJpGCMTzY01HhSQUp7pc0/p80VFr+8R+A0r0gIsQY3tclP4KRCpKAAdwuCYtA73WItDWj4UkrG9NxY2M0XgaKNuwwDu551hHFO4rvnIu5/j5wMD9rrUBAA0PzeRhFRsKW81D36Lcw4jj/MJDLosoOOLMsMGHKW7rNY8kYYHbJMEZtuecGzHv72KCLYeoFhvrsW6cs7s4IX6Ufm7T6Wf2U35VUkfm/sQDf9tvDaASzFdNtgvkdLDyExXI0VPIaYlLxea2dM2MYhkas+3vI32HUZ431ZD3g8fbLlmG/ZRUQLZ+4FLw0RlRpSIO8pNobMIlmoLU14o90p6iaubHFDDCzfcNIbTWjqvWg+ghLNcUqZqQzQ4zrlER1dN8b0EjWkocKBINUd10T0LzljoL77JGCptoO0K5Ctyq5DPcLNBHVOv/rDbgqJiuni9pGaB6OlD4P8chRpFYKFR4OjWAxtd9U/CNKARt/BNkG6hLGwWbEenGYWqZh8uZtQr3+ActVkraPNpHapuRg4dXFKVq1W2O2ASIS4KeQK1zs1yrAXuiLizpSSnW/n0yBsx940p4frpBDIbit2ceyfkbsdvCjaSkI1uw8x8K0S0AyVyGpl6hUjEfYBF3sLk6D17bAcyECi72cBWBAtxwo9QnjLEoICIgxna91jB8JhTn+5WTmjPF+24YiQL/e1DnN8YkZCa9agbFaXWy0T8TM9muuB2D5epan082T101WJwkVqEphNBmolhYhzyKTKbjySo8olBWRXcvgn60WnwmzBnhlvOH1Fyim2xYPbDyx7RC5pngEkabCyx2wygPFRjuH8q8HNjfyNo3V0qLbPC68yLYASxb5i+V1vuHMu9l91B79gxQTNtkyY0s1g3vKI0bUJj5kzuIJUBBO3FAI9vtuhI/fY8jZPr2sKeNDt2htNdDBxBoIDFfgZRUp2o6zaI5diJbpEgLRcF31J968K7g2QH8JXWeMVDxaeDOYOAkFT1kIDSSq8IPdQ1hdTybeKfh5SILjMtXZk2fQ4LN/8Jb0wDBfArpdoqhhj1TAKbT7pIRNVeIALzFqZd1PsuMR9hoQiOQMDAmhCISgj/7Zwmcf0vzRmjNVIu4NH25hh9gqY+GU8xXVl7DHGHI3mzQdYyEYYx5D2ePva1MvlbYletNeCIuhBgeaLqIEYIHLpnJyw7T6AKzuS8cHxwvU4MaLk44HhMtMp2Ciw1g5LM2w/7BlsOFNZWPzAuDyIkyEvMyAEGT0XjOTqI5kXWXzT72X/yDamQX7z4Zgs++J5YFVy0A+9IhXnzdjxy8tDE2oKpNNr6GX7ExEUo32e4ZiVDRRKiArcOeaHm8V33S0ZwrnxVDy+5XkLv9gyvW25npkvhzZzR6trQ7LqgK7C0T+peg3dWdKiSPudwmw6zEpWJeCyHgW5PyGFPgY3GK0H51V2UjuszFUmI0K+X3Oe6JIpwci6v/7GTM5zfoCVa+tFr1XfHhW3bNezyXRTzk1efcN8IaNWpVHRjXl+IAeJYciAyOPhDH54flYEYKVCQJL1MswHJPuF0JJSIHuU353XiDX1bIacL2PRPZv8HF4tf8yPFiZ0ArOilfUV7G+G7bop1vWS3+ww4i32Tj9LcaS1XnLhPJJvGn2JLfWSx8qIzV9VwR+gHr2yxt+cdD8c8sqZzvucWk/CIc45MZ6blJZes2juV+GnzacpEFX+DAhzdkuIrvCL07frCQ0giKKWCPsMFYg9efDwG46ps9Gnzsko9m1m7iZeGnwUzgtqS2M9rLac4EE5hk+97Or7rAs8sRee8eculxaUNT17n2gNBkgRTWsOtjcPxbmmAYeFjW6QG+H3LzYXuo97OWbplpvoYQr+SP8Xa0/MoDS6EOCkjJg69EX8OqWMDiZ5Ty1y8wPcQ9Ns8oNOSJFljxrlXIU0nekN3jPG0xxtWpXZp1COZK6QKOKLFrEUN0vFxkIid0a+R3ZnxsCKVlECysFVTVsJ8Bk1o1/QZGfixUyA1RriUlXiyOfS4z1who+q/HME3ZeS8uZRGz0aEBcREWmWv7o5XOJOgvAgcO5ldCeWVRNmVC6r+koOUc7rz6S4BAcTTpIdYMfxCQruQOtW+Q906Ic4WUG9LaxuR4RgItPrRvX29bgkq9uOScPO1zhxdRLbQ/kBtevEg655SkFbPPJiTmsLPH7lEJeBr28hclZrVT70jsjtHQf1EqMlzCkLPyBX70y1424G+G/a2guxo+xFsjrigq6a+WSiyuHjQs7JyddV+UJ3a8Sz58zvlVvTBQA9zcFlwPUWz6n2bu7g0Dw+SCo7pFI5TX+n6s4g6XYTkaf6FS6Lw1Tb38aIg54vZFhP076a66ZGT1Uq85JizUSyn/Vo+dABvXFVM16+Xn9K5P7kugGdAc87Y742qiCWmcWsBjaklGTdoGNNzcRvvQZeoB3oN6p003fWGjQvWKZcfjDLBGNZ7lvBhnVjzhJoX2iPNka2uMCjLX9s7eAtEDbn8AX/p6sbr2278PoyX8k803VQgVtc1l4VDvLVvEowQfCtF2fl8irYbqiWAlaYN1+Fub0zpr8vwaGHZQIlVeTXZOFmzraGNhsqvbS6/MuiUszywSxDUNj5xc7PlVD3iGMjPIwI7LLWVEezY4chKHitGYP9jiFL7Oaqt9qwVD0mRcKjDix6HO6qcCtr7AeN7TNF+YTjCzLOGq1c09ASUzw2Y01SobFr/whpKBt54856ahPBiQ/lsNgwKzIUBat6oAPNKvdB8t/Xm9yofwWkC3LErLJJgefMl3kPfQYILs8q294hzyTjMwfXc/3TIUyi4LLsMZCQEdUyoQTw8p0ADDmYlG2v9zLNNUVyIBptMiCw9ByZJyKCgPS3oJtUE+SUEsBCqtC6wXN8unlC/GtYZ3nD2xLAV7UyWRyXss6Bf3cxQLVUFP76w1+jozMCubVVhyJEO0Hw918LlubH5oPAzI8K9Lxo2ZPzoqc5f4rjqH2Y/C3SKgKrBKTnYDHsrHJqCTvkLOWZ7y8aBMkeXff+ZK5oi3VdY2c+ksqzNK0hk3a6sDMeqH8fO7wQS3TzIhoh8fiCfgJMrdWUdd+bFSK9dqGDj8o+gz5VRyBpU3NKUpJXatBwCLr4n8Xq76PCdZaZY/V94ZvIOlD8LdgZAHGOGlbC+pu++YLPJSSDNrYgsLbOQ4tDapOWw95nA0eKpY3XDqxzI2bWDEISeSBp2UfuKlZQNzG9FQRSa1sdoGBHdKcTP5WyRJmf8GPJVfgpct88SBT3CdDOwtODJBj7kuKfhNUHft9W39LRA29ARjGDGYfwHL7iFEZ9GmqLu6aCdMQtqKmt/EgpzhRRjMQ9sbcy+CRqydC7LFpi7UT4jt3W+i0Q4v3JOLu5zTHIgMP7AZhE+DfhB61GgeToK5cIYQDmW8O5nhc110Frqz2Dj8fus3rE+fAJbeJubVYYO25hcS6iIFepeHQ+Xayp5Lr5oSV4AjzUiegp9xwHioYGcGG+QaHoFbdwgOcaMuz1VbMn/utUtaiBaffU3/pWeysOlL7eR0/BYyTvzmeezgCeJKL3revxWuSyINSSst083N9syjH/eB73Bscy2t+T7eXYDX/ak7Gb/bOujc66++WWNWFRHMvydeUO8xpeuHnMNg5vsdO9OnQtxTozzqN51Y3PX/lJz63DqBBuM8NFg3eP2lsRlxWelA3fMetms+avn/WtgKd5aLmc6BzWxKztOHQgqk98RnGF7IyqRYlXnn6UphV6dg1AlDZaES2F7ZSFezaxepGFmRFsPqz3bW7jxwwcwcNzvV6wB96hhuEKCAXrirk4pP7R8Mh2xAaQBBrrI7FCzpwcE3EENEl4nvEwqGm6TuisGblO0liaV4RXoxXBZVngTb0yzCthfDQUWtgjjFQT62Y+u5SPnQjkMS97YSzx3TZRXmUUP0vPXUndFGOqOW5wXjWDnhfjU/MvHF2b0Dja3v4mfNAK/Yqs2PnRXw25mlg1jsPju7KhOBeEVRwsZ36fzKLWimMSE5eEu4kch7dkGIIbK2n1IOGyWHLks8gqfkr36gmuEeO8ZeDCM1YiN8oulF0TU5k0irVrCpNvoe/Om3yPklqVeAWhhPrBV5teF4iwwioydnaiPJdQn5NgebBdKBrhoavy0NlHo350GLk3HHd0m+tDj44SFsba9UdFDpnP+4tA2+RdUMGCUl4B/Ms7rW29iErlfKaVsZlk+Uyjz+brq+66u6QTtCCYyxAuoM4meizEgcfmuAvTrrY4BNEjF/C4Nie8eBqGrGBekhYSmLZ3EDihf6dCnwfFgHzCxi9STWGcDNbVwYuELCsc3qDr6gchvMZ6Z1p/Qmz7rCzEq7EZqnjxD0R3ZdS3c9ihnxlPPOXEwDR/w6DV2lW/V4nXzcGjW9oVyGtZ4InBMi09G5I9Q2dYDESCawNPrLvkKKOLpwq43Deh4wHlO8+lasW/76T/zGon/zCupOzjOgbk760bS5FgJ9qyHhyEKoHnFsgLORJCyCX4ValYK2lRNbnyCvQZ8F1aOMGAYLPtp89QwEqaAF+IxAU4f7ahe9r22Z/jKcsWcIRFTEGE5mCWdxsAy9TcGXh5OSvT1iX8tUjSyMMxqC8QE/fkM+IYgPU/bfIIMRKbfN1aEgGgmuEj1sBsWHbNfIAdidl3qFtY92Xdrzu0NJ7oOOyuEtgZ/A4U6rCdwYeulCmiSmP3U1JQENOwRQI4dkUkt7eU+xYJVQ80KDJc8MPGi1nWUTtE/5H7t/C46Q7ZqJyhTqvnvW+jdNbg0T5cXcW6gw/FE85C1yPHjJ3R2hOyfPhW09O7yI6np83jbWhW4QqDWsO64Jrvb/x110x1HlIclAF9mL3eAlqU0Rn3eouTVUiTnyXWAK6DZ4kzl8Mmld8cIRcNbwZriYapbpDaUjoMVBze9ytj6lBr8UNb4Eg4OhM6sHlmVPDLKOB+lK/9j1civ/hf9zKNZdvAGInkKvUxmn84q+JfEVukLXHwZz/0RFXKFkZ6yzRMFG3sPrnO9F51IfX0Itt6Z+cH1LuXHvV+3R5HSerFKInsM6B3ZFCu9NuWcsnBK70PSpA0Tz/L9wwh/LavMmqVmDmAPP54aYgAbervhX5sCBB+S/MM3RfnR7NJtl3+QSzV2Qczo9avEZc/wA3vF6Gh/yM8DhXZbx+a8Mi32Q1pCqdBTmCvvdnhBgpyvJhZY6Aabsi3UpTo6Br3hK5KGOfiHemdp8OlcEmnsXASofXxbKzLwqFtUjiNYTXoAZs8X2EfK0TBqPTQOLQTgaS7Xil1knIoRRghpyaxXc9T/kLp8VLKWS54BanHiQYkYIBH8t0ALGH/s2WuSCyhqPhSqPrk4GwwFjJwKyb+7L1+wSgdNAJxMmNESttsCNsAGcRFmUH+mha5zy9eK2pzmHUIqI+UODJu5zqBhGP83+NFxrtBDApmUdnWX769mtcXM8tSyEof0PZO1JSPCZJyu0mlbgf5nqKhHZp0oFh+rvAq5K20LAjeWg2g2/fnSVtFyJaeEaKwJ6sBQaWQ67TQ6W7ma4HWYxF9Qn+K3/EwkneqaCpdgN5r9jtS0DrMr5Y1Yo8xUWWNsNMxW1fMgqzSZdywFV/EgEUAKTXTJn4LgbQ4eShRDwNEmHIr2dHrYLnEkfW1ZYPhZj5gNjcKBKpSmPfF7U6b0RNmuVOvxZAsSGZSBeFGBwn5u9uiXVZO2l5ULG82JxSKx7oCo608FaUTp7QgjMlMDkdlvr9vVYOdNJ+CpTpnQGY/FbnYlEMaSFgq7btisiL0bQ845VdT7kNFmsDxCk3h7l8sZHYLRWly8CiTZmtaHRmYlPlit7JIcOTGKg8P87OhhKwrFJcoCeCE3uOHE4G7sot7f2Pde7BQmaKShw7nb9fUQI4PWbOKbA40f79LI1w0QFqWqQF7zP3EYSnZZIQVtMIJasMABjEngo9Z0JWr+OeDX/en8EKS3iJaJabVi/Z9BiKXsTx0MbOm1+DhvCrWeldcSqemuXttO59kTfISjSWvaoy1d6DW7pn8TOt6Zg6Xr6oir7/NUOLv5th1of/DiyrspdTj1RBSyTe+13q94dTagzNcTHYp7vaZDVgoTnLyd8oyRtqxcQx6CrxMLbywAPI92QShxfxZmUtr2+D3kIAdctWY+daq5pTcBCILT45crBb6OjKJR65PWCNV9tTTDME4SlJFV8uUpGqPzL5wGTqP1lblczJ0ATWaSjVxZBq77KgZAiOJY0nduEfneb6h52HAAb9Xd3ZVAX5EYcrkCLwhZcdphGSeAxLyTHWpSYs5rz1L0IPosQsZ/NnJd89ymv7zKW9TDuPOU1jpM8Q/mn1hZon3RIuLDrhnihhc4QAhfssj+jIbVlYgPmSmaNHbV01o0RJ5D8nbGxQLb0al4/sJLxs+dSG+H7Bcop0NcULfU7hrVYhzcmQ4YhfjSSuofVob7xmbOY0ULlkvWe+tMNDxSRU6I/DZdFmoBiVGH55zFg3NAc9ujpTsqcknfZtZoV1jHMEjrMt2nHiLaxDuH0wr0opg5AWyJRZXFUVhyF2Iqa2WDwbSoFOcax+7Wl13w77316vVpScxDc2Oqv3x+f4+g5r2VlYmr3AbXLwyTrmX75tJL+yjM4zYp74qBPZg9oOQTB+l8wwDkqGUSUEa0NVGbO67McuB0+8MT1D6zox50C+MuX+xHMerNufcrhwurGjAVIpxAN1B3q6hPnI1RLoX4nqJH7ZX8ftKIeaQYkAdhrS4+IURIkRXh0/gUrI0G6/Kh1O/95UGJgmD7j6vIFula0KRbh3czl+b7BHfgVjCJRWvEkKgnWKMPPUZ2xU5tQQS1xXfgfCD0DepIgIYT53oBB0oU1YIiGzKSlu8/F14tEGvzsBbd7C2RH8jaB2bYZmxJybniq7zOtAnSEbbOKVacR26couyswBnqhQoqs6tVK5ciS6w6dCJ/pBWurFHDxOr857BMGQNYQOO71mGOJxRiuPDDoEt+zzOnjpyyWG1QEmwoCPZKtA9NLnTtRLycDwBGz8JmH3Cd/oPDMnmqB1hZxEIi1hoTZme8pU8cJoUsBgT1MtmrmUz1APz7bDlZlL4jwGXxOtbzH5CYWI9e0mRZRKgd5hGs/rxyO3PIVeCVEq+hwW7yA7AcfiFHZpvWVQy401q0qAkI+DqZkPdPSi438zxVcH08NbwosNM3hOHt2UWFql2GOh+CZTIJsAbhZAv3ElPswsm4PE4c4KELN1s3rbF23Y6ZRoUa3iKYivbVlbWstATyHp28gef34125HBgEKuPYwtQaKHh+2dqu23fuJeZycXywfhEIO9Ccv+42T8CCK1X2d4/znpc7FIrjfZsk+THO4/Weg7LD7ahRyRyddNd3YFPtsNZ6T10Q5Z0SyCcgu2KFKIM5PQujXADwaS/7cezeoFLQ85x2slHEwtpUjbB/V70xgLt9PevTGXK8jDNbkMv8R5LPGztvdO3HSta0E/qbV5/GCSNMY/4gVw0Yk7Og5eZcYSfRqnkYldAhUPnsFkM7wjF/bpkUK3v5EsQC66MiDCbBhTWbfv41DHgpJwMqsg1ePjvKAF88ek9+gvbrvcN2kFodfN0OMnaby/9OvOheoCvi9TCtv5IVVa7EE88S9xQTWHCFuPbtHtBu5toLzWQOOJ6/TJfUBo+0u3w+v6JxCJiewJ4+FIXu08JbVxeJ/cA1a43I9JvPLaaO5oi6/+WdYC9chVAssLpfWMTQ8Zb+f0Q1CKcXFOhNoi28ZCjojXzBIQ1nhf24qYFNgWcHBkAhe7+OgXx9AG3sdKfY0UHuuFf95ravQL9Fsu+LyhTUrhJlgDJI9lYLXa7FS1wmuI3ZOph+dfTq/Fw9WayjCv8GqH/YsQA5OFqO1fZDoRYijA/S4HBPnA8CNkZl3oOyx8hU8+WFkjN0++7jsHfFh0UFOJNYCXwMkCcVpwHtO8gEWj2v/cjsjXJcWuBfVwDZguAJnKAqnfFdiFjtfM93A2j3MpPIg9LOArHaWm9Itdp29E2haxcAly+2uINwS+nm3UuUAdVQgYZDIaNV6N+Gl/WFK0a2Vc5IJa4mEZY5ImmEdc/hoh4U7PUG0hp5dDlLkgWZYoc+1BGJztTZ7dwDneSF2fJfI1AE2hL8GLTNIuDl5luldXrOgXHC2lNuLEYBhMz0wbljeQz1qaqQWR13G0hBp9Kc0qBP8lQwa1XT/CQztYQ/+EIe9Rz1qGYQPQ4RPIs6Y+SyiX7RCXd2RS9ByAPmCzUQoe7mbrx1gS/lRVLDw+vuem1lVBFZdYEpcKJp4xLeKEc5cdCJVwDsdY3TR7EXvePiASpbs/PesCn+GTf3aQCXU6+Nrq+y5q5gyhrQQbcKLW67SyBg0hGF37ICm4XF7PXTQKd6uxFsoCuw33ANeZFmhT+rIu/RA7sOPh+OBTPYzCKOSvgf5B9IIt8jDrMwOTaBXHawCwkmHlJzqfOxjU0HtJtgK51YQq6Afaqams0tgvKK/uS0cXAWprd6zeYJYL9RtICCq0ScsXzu65eaMAlZnLWlFfIAywhO5S9MW1jRV5koWiUMSCTQm1uOCJfLhnlqnurFGfuHrYuED6sLLQeDBBUhIhV916s9FtBGJGcFt0QLtJzoNo91TWAZaBfzVPnujeAImRFMFwPEccXC6NeZu7Bd6HlLHECdn0xojNK6j4JjOgb/qUXX20nXI2BsQs95SDJAfjbVMRnSqsVYIawqbHh+uTKNgzFm16XageM7wFr+ZdcYKMhKCbBkFVTLHHHl8bv2CQwar16OtmbcJ29P3uA/qRj5zZmQzkjMHrYE6LzikDT3aPtWeWtoDPu+ZX4Mdf0umGeXO6zgYr0r08uMhAgaeAdF4hqzVXOdFwRKibS7AJuMX9hID/dnkE/gOd+uoxxqvBqgPKyU4D7mNmoI8qosqdNsyyU/nMWyRyjt3EvyFjJnQL+pQV7eAO54/uzOxl0se2SsjqPM4yKPy7knaXluS/HreZxVgfZC1anJwaSZbffHEn3kqYfqOJ7blnvJKhfVtGhXu/PbSUG0+igt49a3uq3LWNtcGi/A4rERrtnEJRW4h4OloFZst4S3kTHDUnuP2IoGEfiJeMBiichPfkb3tWiooiPeEigxbobUZpbiY11JueZXDi+A4U2H9ggJEcIudy62WMsCXFjcKvgCOSyzCnK+y6CsTUvGut9vnXUXsFSt/xqa2lNWuivg7aC0r5BT9o6pPap5dQRso1eH/mDXMGNtrHzLpukpYGbB78H3k55pzDNqBIhZEi+TYwix/PraPzss2mSF2mqRDRPKHvYODPUaRl3VvyTM5kfSMBBxfb/bV5GeVGA6VhfAnFOw8RvMzmTFBSndJLYcFHIJxMPM81uFnh/YutqaCwP1tetuRkQiTEp6lj9y2z9OmvMRd1sbgQUTo+jp52cp93DUOXYCLCEawVOonLvJuymYulPV6sXTI7wPNRttrFaJ3cNDGeltNXa7gQqAD2xnHrdLa8pO3IW0poVMI0fJHR5ltvzOUKfujgpr3cInaOLJmxjooSjBkZAcbYj6b0CXiMpdWhDvvzuKxNo9s70dXqudoiwxDAgb6xiFekliiEc4ey8RDXCNDGUCghWhY607wlxZB+khyoOy0JhtjNtz+dpLffQGtgyRg2fhDLuhku4ItytNonH0oPNL5e9iw/r0ecLRV6w7+17pJwYwhdPLwuoVai1gEi+pQvYzISkZ8/6ImXWuWHKcxYD3DRTEq/miUr9wdaXFbJJf7o0E3+9DnxBBaoeVPFClNT655+XaztD0Qz4Dtt4l0ned3Xo/30i+7hdSquMFJIuOAoWFSxsSBffLVrZ93IvVFDqGtFiQkpgArpWG/CkMe808JS5ccFPS+g39wwDu21NGbVzNIhXHA18fQQVlnUVn/RelqCp0212HDRN0M6PatVgoMnDftMMq3J1jA+/02qYW45PYXr9qhE8p/eb83AmMqrLyxl1JyUmh8BTq3NM83BHvHVbfsKxDZhSt8NptoSAsY5VpQadS9dxvZMjY8XaeeysIiEZEj47DY41IA6/+Qys06eyx42eyomcCTCmseEuvbsuDymDLWcCWUHPwLG7mSU3HpDAIrnsGU3gJOjEAhf6Wtgqb2OIKRmUbVkvpjXTZRAz7PlqQl8nBNSN13v1x0RNiyVD9jyRX5VPzAiRJ59f1M3b5+tXjPljidYJX7U01buZLaYi+LLJCt1MNXA5Xi349f4AYwMUtEuCgrxHokCg1H/AKxzbctlRU7g16ooTGbHTDxk3kOlDgCiOmah2Hi8nwKTQXrtvcx6YZ6Fte403eYuVjnBT+nvHSz7pscr8+xiKlRIy0WppLDY+YzzNTTBxh8AS3xLJQNgER751Ud7iREFJEn0qekFfLlQLqiwFi5RGQrF+dZvRDNxZivIUkpWkRBauwp2IVTyrfzT/vSy3irOsxnzNCwAi1m+EyoQZKH+PqGbSwIkhSQB7ljNLGaPrjZGcGQlukPPsBwhcQ4IKdnaeEsw+/JLe+NpCVBEYa8isW2mZH0NgSTIKproqHk6VM6ALV3zQsLzmtuZpMH9gfRVKauizFxtjJ50nJ5/iLEZmHYjsXy63dv0mzYM+2wjT1ZTd13Y2CgqnwDP/4aE0mUgsbj0D6sKEpFYTIyXXAddldIPuDZW1IRe8yLgRScgUFnVkVtEJRjTPu2NV64xopBO7blcFhBr5cgFU+y/dnrn+WPrhEIpc5RPnugGwDJTqXHMAR05YP1cC5S4ylNZ0zm+n9uRD219A9klI3zQV0vtLVrOIgvWmBi3eRe1E2JFxfrs2OXERLZMLualxG7yV1K0MD0ZQXM8z3z8wOMbWa/n8kuBr1ap+uMtJnu28NaS8x77MYQm4J2Fx6FJAx97MemlV5ED7bX4TJpKnWZZ8+h7e+F+/q3xEVS+7BwoL16EUSL3xI1/ohw96Ok28NUiryELPfGgNWKVU2pST4KEwGfu/lJgoIPHbPNTFy7I3yhIEQcB79xxjjOzXTrBaTmmG/cYojnT3fJGlNmIQ+RltqQ4C5x/6PyQKus0IQcHqGf+kwWsLyMLdQjbZ6FFd9VfI+CW+htiWg5S6CthsfkRulgmnjwyrYqBeIoHH56GiRdFMaakKw6QCRyYxA71KwQ9t0l6c64ZPpw1JhrZq1jwzZwOThrTk1XAelLOQqu+TPebk5dtEVtZ3oBpPdGTnLw5b7ddvsxyLvOt9MBDOsCJpgkOYMhvZ4nJcO6IzGO49ksmg+r4isOIv9mQIaMppQh5QapI+oj8HOwVIRrP9hm0viKnOxNcNqZfp2TqALIDRuETWB8NpE6maCB5yaO8gX7oZIuR/nTXgjckkOo5i0IO3opprBMKlpk4VI2J79Uy1JfB49BaU1hHtHlgAwr6csXEGHG3wW3k40dcjfAGO+IJZb0ykTOtuFGkdzvvw3xvFVjmcRtMMOO4cM3s97VH3n5FRg7DZ38akKZvhVEaPYfmDRKq/d5T1aLKcN+WlnCD+wrsSdlbImGopHEFVBoAaUL8r2dd6X2i0fc+qmaK9xnZLvlHBoB5q5nw3C08koB8rFQSGoccquHDe+P0vKYDddI4dmew36w71CLKZu6fpCRjz3ZF51ZMIGnDwgWlLkz+y8Y8wQEp5c1iuuIQv5IVbDcA0bmicmx7T5JNAm0+hxk09mVEP3r9A4b1yTHYVLGjpt42Unu2I+nnGi9jfs4MwLbB2ixBT15QSBt2fR1j0kxPCpJUqACtJ3T8M4GmibUAK+flN0QMKaT7lV7vdOaaC9Sm3n+fVAbI8pYsnsx59a/cGwH5YKAqatuqe0cUZh5VzEXkuoKwnz2LnpqWsNzQR99b5EBh5jFfh81CGzPBxHLp9J+oLrupHFEufQZjDUJvQXJKnSRKsv4w5dKvtHtMjaU412c8oPfWjtNFt2/HwC865ySMNAAKVSALY++dM3Cls+y2BqXm1K0qMO7yZE3ADG1vhuDQzAK99IrC1qJs2SHo8tZL28NyioZ6Awm7wMcp6NhKJCNf2NFXcQNB0sH1plJNb16zK6dHbXzttoxlwP5TvRGZo5rx4zXh+28PfN63dlidLGMLji3pFGziyH1E63bzKg+T1QM/qySHwz1yojato4CE48gvHC1EE8T6HKxA9cpLIn4JJWfZWFdO5At8b+6QVcKxBCoxZ2hNkdUnOpW92euM+3i8/yqjKtUKvU+Fu3POSzLOU62frrCXQy2GByo9ktGtOFRUS47T522ESAT8jNvGE/pIyNnDA77ii6Dj6NxxZRCD5flefLRsvTKTZwPlQ57yAED1jy97kXdC5juBhcsquot3tj3/ve040hzrOCOXFRYEMvTtUToICaJS5j7WlBS0izfegLl8ZGLu9i/9GIluQp2ZOo5/fAv8QP59Gz12qQe07TjB/68JkIv0ZTKtPzhtekRqttnwpRQ/tMJNMepRvgmyY+A1O/b11ZxMdafn7KWi+Cv4YZH0gH2O8zAR1SpZij7F73E74VstA5M32ekKzKjbC3lg7kDGWxNxb4cHv+r74XUeXybDloFcPv5gxuQNSvfvjLbtTC0yYB8mMwjgfsGLJ3kQ+1aqakpaPrgY8cSvPQvYd4lHAS89bczkgLTrB34pFMmmd+P4+5ATquj3ZcvzKWSq4Z8rJzQ07KEn9mxBeSteP8x23KV/vy+FTsYPJMNgzR1UcWhjlRPn5aXaVnhItOSg4SC67IOZ5kr4fU+PVEqepNpFwSeOtkuNje9Tbx+SyeYUgGfWuFXc0ReRGJtmcreYyE+BMWyG20Ooq5u6uZlm2y9mzcXeCoZwx4HtOSC8LaP2jWvld5zq/3OOPw26w0zdX0xm/Hg79CJZhTO+ODz4oZmqKyRms3eJyKZrOgRzi9B0GYL91l9JEmJhuB8HX3vzW2tCMVDsj3Ql6OFBvozA493HFTGUPxUNZsJefYWLsj8RSKyGbsoYBgsgDRoIYhb06BR1uKEiiquiu5tgdCu7jYkGMA8zN9dKOeafAOjK4ixufQDJlapw0+LC8jX+2p2/h/v29CGpzL7k1Jt26N0WkOuPAU9+S45l0tC7gFebtdbH8OjoR56WwZ0uq4kLup4LCWUYqATxTONFkKwRCljTisnCNUIapgFLH4VK/L6hUAwdKQXECRjGHWhk6QfwpsFuPp8lrPp+/akVKBnzW8rhR2VLY8pZug7MY5Jr8wq0p1aPZKETCI8Lz53bAcpZxUVuTowZnfdOktRxoMHeMPLa/yYIAOOGmT/Fdh2D5defQlXbkcdX1T1205hQanoz9gP5RVsHjzyvWGbaeO1+0STsgU1fCuErElhaSdHj5M8OWAogICLrHZcwQhRDF8OR1pvyE5wrF+v7FCCe2TfefaBNRFhprCOkAPWj2XnAsaqGpZY2p4wJ7N2h4EhRvPi2Zk+Z9BT0weQyM1kkPsGdORDY5ZT3FsE/25xTR+25ma0lSD2CzKlJZxLuArMVlT8hvvjzsESfKhefwcdsDqgy/dEfLriNUKH8AZ1nz/G0OXatbKH+BfJv6iF34DoiMzYU2hX9miVHIuvaOUP+iLpyFlGtnsKmfbZNProNuPsk4uJkY1BhJejHkO9ekKeWyaYMRCHhYamMJS7/K+DQ9jm4eUUnRxYInT+jrlY7Q5na56p7kB2Kl9NMTBzl381Vo+yPtocTIblsL+Gp6L6S3QWLI7W69x+DXCyB4/AMFgQIF0qYeRqRPm1A6DCXmk/FDodse87U8r1rUZBgWO+8hwD/K96RtVahiXMOlwG3W9ZSx5JXWKnOE8ddj/kWCxZoGV2wjb/9IT8/0WpwqAGnBVCFd+Wu9Yhl6HrH+eUYYUHdGdd0gJrUbwOiPfBC4gHX5FQyFQ381gOjyS130jG7mbu9HGglDfK+3kvzCnaqOuFj1HSwL4vB5vV2rYkG6Euy7RT6AwZM09Q9CO1MzB/IGcjSH3rZMVOmMW+m1vkRvGSci5f4RwBHGdq44KR0Q6SfkaH8QhZkHOAqRdgvbeW/xycMA/qhoZQPXTh/TqCfNMaHOkI4BvRvhjRGHjo7vBMl2PETNTq1Vc6MZYCl6Tx8pTeIlF4GlVykjtLfCrRaSsMjOe7krBO1kd1910+gYDL3kFbab4NUkHraT8OpVc62hkdrcfQiS6AXRKabwxwUTyqqOGz8XA95uyG3pskhdL8umTSPZCJHwuAMaEZLvkQaOZPqEu3Flb4uD/GESMImiACHYV2VfwIrmxCE3y7qoH2+YHj9eCkWZx+3miwR/QcAG7+ghFQzVUXzWs0g02AUnve35Oqg6HjAddGdv902mQoO3bXjDwwwzSRYNG10ZDmdx9JybK4BUJjbE8Lq9hKaNPWu7fLYjTPg9c4z3UApzkwr97kUeOZWBcVNWu9gSVyqbkjsWCV88Zlf6oyDunrP4iQqwaaSszc7FA0LBD3GuRlU/k/Q/FfhNzglQd0/V1JvyIQHbBrcnWr7PvD11sWkoyty3s9n1CKbq7bzI4mfXNOENgjwz9UBwGLSC1IJ66BlEAs49KINI/PE/+5iW4KZJS+BFR0dXsz/tUkPlSQ6DkbFx/F4tWmAk4IWh2cq+d3qF+9nWxZcxv/nA6d9M4NjFbNnmO+APETa8ImyNtSqVm/ZDGSunfHdjA6wiLHydlZZRxCX305leBbS9mxr5Ucup+nMVly7Hsw2okshrpZ4Bhvw9zw865G1LBb5sJ6yqiC4NfrwojcfZh1WHltH2eJ5HKR/Qr5zwJAC1Ubympiii9ALNIovAY10/yFux/+Fvp6+u1VQgAXPQAeZ4UIoQCVtUpWp+to7X7at86/FhiBhvbzVojxwwCRtc+sf1f/0/G0/8slg9THbGbDVoKxndvpIwjq+KCv2volVNXNCfKD/IoVPNtxMRYQqXsFLaExujSEd5PARm7pQhALcf123OhTR2uulNpm+Wyj+n0QkuoD5owy9Aur2WnEUHKu26xBGBgbfEJYLeSHi/7jalWdXT7FHFy7Vj3oYax+Gnn9G4fQJhRn6x+JG/xs7gYv5++WRBuw/N1stJkH9SEUhoryc646n1A4KnSqbvvxlBfqqCv2ullpRtuZV08owbwrVbR8RNxRAcbp1BDdSKmfTc4P4tskzMN91YFA+s++OP0b2VTwv8F69Iv3fdHu19QatJBk2HBIBnarYLd/EN1Eoy943KHblEvzGh1uXeSIZCKx2EEDlVXj5iUGG3UHsX4jr22MTlP/T3F5NItAjofnxAkNmozP1vExJfCjjXb8XIIxDBN7UnHPDnanpkNW2JkvsrARyL5ojkrg3YS1K8LQepzPcXlR1+607xYc24WhbIRonSsYB2HbI6z+aMscOdAmz8gY7w8jGvK8CzVDF0OxanYqAMNjO6J/h62s8uO84QfeBY9seo0kJZSVzDtG5OIJVOvBIjsv1MFLeRNb97dFpvlCmQweUgowKL0f0tnLK8WPSq43a2sOFQIoLAHdFLW5oe/EucKb1BpNT0pfM81xSCA/pi51gP0eXZb2wVQrMpnzaiqZ5HW3/BKghpRswxLwFJgKSNmx2EgxpTRPC2dZuus1i9iGCPH5CZLCiExhyfLluRsU9LEz7x3FbaAlISNcp55btGb8W9PN8mHl9CIxX6EPERnM9ABiY3uZyNp4lJ7q3/zFtXd+6sz4lHUQkX4xk4JhUXM+zp2xcQEHQlZDaZ5ofEAzGr943HdpNhw2KykfN9ACVfsb2RjqlE/7YfGtmANVgkjjwSgRXNi5j7rtb+bfDTiFY7QOfxB4NFcgpIJdW8szEm5T0dF91k3g+l3C/oWOErRMYtlW38Ce5i3bBh33YbrO5Wcc1BtFXA5Yh+y94BWoOkQz/atkX7BZ7pFpsWvUzZrzvDXBO8+1QiRDMTxZbHMkLBnzr/QHO13PKmJ053WhkZOw4wj3Yw9bFH2l7Ecw758CPIaQ8FWEj2XrL1W7CMPAFm5urhSNcyeHimbxXyKxRzXLpoe4ERJBfJrp5Hxj3O4ZnimF2G1Ol+xhsmyiO1McCcXd2Gi9uwcrqdmW7BCSmqVzP/w5bXDzIO53Wlp4SS3/hRsDyFsSI3SP6jwebt50gwWgNb0U7ccHdLlLhI+E0DwgThhpbrmkS7LHpAIDH0iJLSQ9N5GuyNNhRuaRMxChc6Lgso0JrBtnjLVNn7eYeRrIGyM1+CaG76WgvgrN8E+ThiUzYwvFVtursf1Vv+YUP7CRrMkjhjNF4qzzF1Cmw1zA4ejVxstYhv58F1vfRSBqlGruFGJULN20+pTdFpkYmiggU3Qff2gAvVfzMKjwShoS3uj7R16qf7sfVBBjBgvc+IL5CGgqvGpzCAeC7M3/e71dylAH2RiYwoZqgj0c3+M3Blm3tw2Zy1WuICUkR9TMM/O8nJzk5+ro2lmKeA449YOIBgHtGlgH4Gqo4wGj/L2O4O3xCIjg3vz087aZ74fPI+VaU6NXQ4kUjgFPMQs5HYvFWPwcq2MbeXuQgnWCRO4/DjyWk4w8j5CaSCsHeARdGZKZyp7xt5VFWv4jHdjZlS0V0nz1R0eaKZE+yzrUTCz1nWOr5XvupkZ/UEhErNh9hicO++NifhUt8BwEEVscjETcLmMZRfiPCYnbSU5EboiXpfF0yLfKP3H1os8ZXbJKff/UWJfTtN8WsvkAS/r2nwJr+UUCIVU0AZeAY3sM9/dxVDKevAeqZ25c9gFHTvD4KvOJ2IwqC4CtCwSTF228V6PXs1B4FGM12IHrqAHpsGingL+vf6GaHXFprW4ntd4w1MVSr6TkRf+fvvlLuOwvQrBSpjqAiXAfkNY6L/YJ7jZ5RXsZaDKQk8JCzkV58yX5qrme36zU4J0BZZZjhtf4+Stsas+qpL8WjPQTDy8Lzdq7eRvo91Y+VeX1hiK7chT4+gd8bUfgwPTkE/D6HfR9c43Ah2Sizr6+opQcftR+RDlQENEMDkSpYEQikP1G3mLKWcSNwzWCH5PXq9iMzPQo+ayEivnbN+OSvw5rNExpAlhuvW7qaXGTXZviEy9cOdsJcMRrUGLtaHGX59U9JBMOQXJmZyX9h+6bJvzBZdJr9mo+uBBl5sNOADsy1zRhyg4GQTtFNVRqrQgMeySD9dAiLUurU7lxLsbTH3MuOkfRFABd3CHLE+F9nIJFlYHpTFu6Q77CKrR7D+d5xqkGNa0BGHFW3uy9hBg2S9gh9OZnt44eyEpxh4z9lfdz4HBAicNZitKRRy/I9nGn/ZkQqIxHg6VL0bdbMmieFQORm2cPoiMdxGxtuinT4vyFsm2PtB7uI/0u0BoRYFAQBotm3btm3b7mXbtm3b/Nl2N9u2XbOIWcgx8g6s0BaTmXRL4i1vxz76PR6s4JXwd78YJS+DY/NR8eJicoORqLxSz5408OvrvFVuv4CgPNHNSSZz+TFkaOfZGqjC8AHZiK2IbstcVq3SMsc6Kc64fO93UnJZlCmCSQgwQ+scL+rzMAAEDCSG3FFBZsty65HqQ9tr/A7JiIF22KIoyuLgDCzYoF2e9EkiNH2uXD1uebS5zrlUMNwB7Fdv4PSOD1iSnGIPfP0n+IyRnBrMboV9s7y7BSmcJNWP+0hwCsZ7SMV7+/nrAnT8ywOlN7jJ3fk3m0w/Pu0LfZ4Z1dokIGpnQZ3JcvyqGeq6OuWq8X//02FAkX/tbC62Hm12ZbABx9nf8F95K/TfubRHKg/6b9v6eNiUcAf5RRXNKbwMSTj0NsuazYmk1GiWoUsHeUGmutSPYZf8EvVT+Bb4GVYGXg4K1SE/TAaOLWp2hlGJk1YItLYhqpgHpJe1xpf1b1Iou43H02lB0pZ5kJPSdVuFeCHo0OsihoBIt5c6emFssjks6uH0EP/eQoxX2o/+w1dp0v3GSEnsonGlIM01WrKJ7uqw1KBGIpPuYoqZk90GYPnv8ZA+ErEaKohiM9dzElzR+GzGuGrE/APRhxHrXEbT9ZFFoYMSwc4G7oKcOxD/J4Kw5gZvKqIRgiOUiT685vNlCLokhczmuI8fR12rTLywFGVEGnH942fe8ZPFP/783sUqJu04+Dq7WMTL4EDQiQ6rEObc3z8izDbmz17QLsv6QDMDwU2xuxL2VoRw9ux53UMc22RaHK0YJM/324poxH1ao/qoIOEFc2+IlefBwrfQkb6JVRbDQtRCIQ4pqNNmZo/0QM7j36aExMw30CiXbxtNa/dWBQkk/KTo0ZIcGUxNTLRzeZdLUMLR46Iusa1E43AUsZK9j5X4ONqZbMzAB2ZwAA1YzTECL2Jb3BXubi24ONbw+BSbazsy819ZJKW+HiIM3Dv0DQHvKAlwqfKORuhVCTb9Xonp73Swo/VHgkvvX1DM83FaVRDG9MPiJNfEZE3lBR14DJak9vhVOYvYmHo8GhvVt+TMZ57VAr+icehL1eRscdKAU3VB3FqDTszjLfzmVxL3ze9+AiLox9wQ0fj1+kVdKAFlur2G6JpVGh+3oH+jIDGkNw5DPozRf2veyngbNGiqoH/ABB1oj+DNv5z1olCni/dg4n+woR8buY5d3/uanpKi6VV1E2vHgkxMuuqfSOl2bpZfHS0IqFc0kNFKpPcf4fzQ++7MomcwTovT1h7styNpwagKiRi4dEu3NdbJcF26wVMTFN42Mb+6VR2aN0Y83mludh++RevFhPXHjTv6dad4EC5ps3adEYcdxMrq8yyYXN+M9KPig66m4CWfbQ8BWHZggc8bueEe16yRuNdQ4CWNV8u8c2cUqLR40rwqgjpilib/SGC0yEyojKKN+F9vmiZxEhuNHJuMrJznTrrKIDb6MYDfEFf9ySq/5ZdhHWmwEucEooW28Wt1UYBuk/0djRGnEboru5Y3/pDTO3Yupt4y+Ej4mSRS55pvnXe68EE1eysyHdPjkB3DS8jpNSeXZ/iiutBZiWsNrBype72T/9gZYa37Vge03uL5xSxJ1emZDguLy4JftFB2Gm8/N3/OWwB+q2oyRxAhtZKb5yKizeN8znhjNjj01JTDsuG06k2ky1wPOtVlG8F0mC03XBKpl+jwIMlxdpXGv2ra7iI58Bs9+JyAYt5qcY1PRsNTDvKNDTraxRTp09oDzaxS05kgn0+KnlHRu/192Kk8UiLsqHM1E8TJpr9Bmd+cunEGl/QZR1Nba/AiKA5F6OVgjqeWVGOeLs56a2mxYDhzk5wDdGbQjqKWv33/AaYR6PkrQVTJHOSxnFBqnrrVuwjxxESXJSuQiHoGyQ+HC4rVwrb5DbwI5uAmnDPUthS1Be1ilSL6KE5+GA+d+c3/iFpLs2NUQ59JvsK2LSPHCtpyjRkTofgFTnMrHRJhqtuVGFEDpGcXMhMU+pu7+0or2xV+5LeFs7n4zKsK97e3c0OP2j8wUDjUZjFfyP7zmTSqNv7PrvQxovZWT+zgpZtk9BeWGue0PYfc1s/GZRgCMVzlUJAWGyxOtMlZI+2o99v7+QrQr60IzNsBbwd3jWTKuOmWN5n5+uwW63EwFpP+AYkuluXbytcxeJSoKVWTs6eqtLjPp9npxz/NGonQ5p26GoWlDp0CXHy7WU2hugtWUVojNjOZfzcoCUm/kx4XYl2E8Jx69kxW2a4FN30wEL5YylzaOHpslFe6DBucrix603T0dT/c3xNz2ckH0W93z9lvGyWnbxnU/2vvtE51j2YdOU7OggDJCL4uKyH7WK4QPpWKt73XGAZ/55pgBXcXKnYXdx8hrixe6faQiBot3QyIX2A77Igjy20pVrsLi/EDAJIlutg2ZFRx7L+OVRAbDoKaWpon1Ra/7DO/P7jvBiNBGa24o4IeclleTnSZCLWvMHpHM2tOVsV+rVWALwWIjkz8x5qslhnsUlgjjEqQIZAgAEkttcZ1n/HJwFmCa9xbm6TYI/Cv3tYlcc6F5GZTvXUcdDf9Dgf26qUnmfgUWNJoyvo2kaxagn6eY74W3qvPNB9Lz5ValUXeT//kXvMWxCxV/0An3P0JcIdz0BflgrIuyie7rmT7cSvDFEz9tiiNYC4Py8gOSa61spQppUW1pFwnJr4IzQv5L/dhGnzM4F73zJxi/xSX1disJ9RzdF1GEn9OZ4eVWiuB7S7Pi97xxdALZQc9eYgCU/bMI2LEysjVU6Lx/5DdhCZeb/1e6fnonphbwlJQ0SmHM8HGNiIkVb46fRnbEa1Ed38ISfc0C57dDHdnaBiVnVPkkzqtURIXw93lvKZEAt8G5muEmsf331x/LQeeFfUwE0posjvm/q6Gp8ZLEbj9xe5NXIaPg+Y8dRJ+WEJlhVWTP9ybibf2JkY+rxtRR+ROU1ua9ARDHj7PT1jUNOzBVeaW208nIOcmamag2y4Q8qc1vxlAxq4Zm/hzWZrDOwwYGKKC9jZq0JUcM4p4oEkNrEalzyjfLJ+xH04rVPlkKPHn83qBehKlSwoEW707XVyFpqhqTmB6Dn5TpT5+ghPFWdGN/dvaGV5xr6jFGgKHIBch56K+Ei/tqmnG9FCNlPg4Ruu8QlgTRemOu2WG1kEy5+G/R1xWETbzJ59yI/u9CH+1CkT3ESClt9APNVBWu3t/TpDJo6QmD/2VsSTiPJsVRKEVYyMIKmhEp7d3/MQ6/j2Y1lbhjaTuf2TiZsM6Vsvnp79S5dzdrvW2iVTrS4lBg1GJ8v0lMrkqSuTAsONo0YA09lw/I8DEdPDCukcwCkyQJfrpMKlQU4TE+hbkNL9MtKNj85lhB9PrLmx+yBAlf+4wMAUXBq3h+l7F0uVKKxLGzz68daGZH7Ql8qpMP3buYpnodp6QA8WXAzbZo5AhrIimg18mh9e8mCoe7qajwz+kM8xbEsrit2/+eXB2XEhvgz9iw7Ln5QMGqsLlOWN2iHdTHbripPSMZh/va/HsnClxZgz0nW1AaC0Xf678lBIKjXh1Yqqx96Y4PrAEUFGkwfooXDYxEoT8JIjkTqOMRqLupuXuGmBRibTZA/50cXZ57LTIzeO8KTEfoiEh4mnFL1UKGdK5lfDagCs30kw67/cC7kSJWgVXSSade9SJejq8KCXozGCorvCP4M97lfcGV2EIlXN67X+i1GVPdP/xOFXBiMSY6QGeVsm+6jzuR98ggWgM3XoccNIppnhP9JSaexoFkAUsj/BXKee4XnAIbmw8fETRFZsyZkSosdJmUD/S2IqwvPnNF6UgrO4XBvpbaWkKcEyT1/3x6eylHMrABdHMsuqJp3yXwMy5lJ23qw9mmstnHUzQUfofSSsiuE6PpOoW/ThWSunhGp2fZQ/rhCGBY6XVC6xCkdNVmW7aFZ8e2lQYRK/NpnO2OX/YCZAvtGZtTyy+I7/CZfgP6q4ECjI4xMqT8sRVp71Az9+Bn0MKbabSikn6J/oH7fJOwckIL6WjjQLbTNGcA7pziT0o7uMFo4W0lXB5GkHG5NnO4ZmWpuJefOMEnZhiyCGBz5CzQwJLruLh1UWC7pfowmzU/nuXN/1Rx55YJMZlEwoL1nzod8yokSj3vQ+2m7Zbr6qqc0of5OOQkNHr/BKuXm+cEeHN+iwOGl0pn7OvC9e5SvfMVGFCWQvTtW9C4fH1U4qv8eZXfxqN5EHCV2bmHqOaGBLdyssWeVyVtVaNZ2VOKVetmp+5kqJ4bYGZcv9ZI59dCx8lN5K1rOGCAXqS5YQ2uMglXOGKAr2jTF9qyp2/mbo81csCyhOGXp93M9IU7+uS1tC1lp7TCOmVsl/0ZZR34rdmmW9HIQsigmVDJiO21LbSpilWtNO+cbvQj6viYYOaCNkVw2ff4/Kfl6B43niyLpB1NxFRchT0jY6W7pR/jvW26zsA6gVFr33a6nJY95gQxMk2rfbLXpUOFSQITJP1BIsgCU84ikyid9/rWnVvPfMCPtHe0Q45voB64E3EyWExtMoaI9wOXO3EAKy2Id80ss2Q1/5r8vBVVIUBAqxQSkoL/omZOz1ZhJvMt0hfBcavjRbrwOrl+nXtYkw87b78FLP7qg87RIzGDO06Yw9Xx6RTJdbaIpq2B7LmMsRhbLsaz5h0dVwZ5Ws3Katq4+d9/ytCf6K9SoKX8FK6nbrKKEJWFUG2I15pxGk9KMfWI5OscHyN2i8nxxOh+zmssAjeIBNPXuo6Hi2/GGodnsWVyCZoH7sc+GYBnC9GgaanNnNJmosMv0WWgOyG7h3afSjfOWCx154s13aCQUx7EohhCwSTtvkljIULJ8vtrvOArUPM5w3GeN8bISmYYFx1oY+Sg8QlCi8fJjJ4u7ACKuUnxERXs9nRWI5zavmhFXA9ISf3wYg8nSc9L8nj7mqFMPbPvvZtPxebBTPM1Y4m8Ma0pzJPMSTPRrFlYHzpRumdf5N7Y8ico5ZNbJzNKJiPm7h7qfs/fqjMvbxm/q6s4RRUr3lSYEdEEeoI3+QoE4BmVgjNVidFqQLJce/tDYsihr6kRtL8Mj5Y7XTSx7X5Q3ru3WmgiQavmzsgTBNhDzGdBzdZwtUME4t5O/zTA3/2CK8Qp3vq/wFSSBwSSmAQE01e1kaw8y4Za7vMeFJBDdwyN2ywCiRte5qy6LO56ubzcG0vbCPA48m0xNEK9yQdA9dcCSQBhkt6gdml9JM94kJDJUAjKesbxswxLygRgtqTNLx+kw8ZPsDyDV0lu8LUjsAEWNFb0H6kGTQsE6nV1zFuPuAmrrSsCBWZ/O6XTwzSkzquhkVS3vtsnfAmdoHDFlZa+oID5XxWRO8PNoU+yYs5qFEvluAH2bE/6ouBGPKZI0FjoSSI9GcIXAbNtEOW+ZEvUR2omqjlawM/hpXwtyBWOS5mHFZu+/8tnkFrRjc3Lp+5MDFbeck47iqp+gNcBYgrJpeRbg8Wt7ktLSJ5ruKnOGEHMlG9oQJt/Bhv5/QfoqDLyrbgq68PacZR6sFuVaUkUxzW2Q0LXv5mamwr4nI/EJ2vmZefpJ9H3BRFCjvlkFZ8g0hx9pdkmHvDYTOvOoVW4/WusFu08gQObnzzjasRQv0FNKlLmsRuDvK7a3OA5lhsyYPHrP3oEO2dbFDew4T1TJzxCZ/kE+l/FO5ZtZ/pkGie/e0lZYrNOrBKFPx3GE269cZIzbY3+z+agn6GNJV4Wm1/A209Pxf0OhwOaeTQleizaeYpgu6BUkrD1n6okPdt8sPTUuB1pq2zGkuMG2D7wXlsw7PAbJMTT0DZ2pW1JFwQD4YOIMcjBrqvUJF0OHvjLLm11Tik1YiRvhlsHh75T/s9cr/XzbbGW42gZwFMNviz8IfD5k9A9hr6+cQ+MIa3MUBOIRwajnUDqwForb+T1tDbDhemPAZ0LyzeudZDLTTrqT28oWzC6mr5x9uORwvnqNQCuiqH5rZ6JGtyCKIlA0zl7dStDzbCciEhEFIXbOmNB853WbpMzvBDItcmIYETms0aNgMgXihuN3Ku3r21RIKR0hxuuKxJtWfju/HOqL6qPwsz8chyGRwVNzXGemWaamyPzIbE9jDYMk3bIvVn4FvzwA92eIz5Pgt3S5WM2kKFOIzDr9cXLkWD7BLYQHPnzp8wsflRHxyf5C+B/GCsfjMSWXCMQqDUR9BR2U5fbGaNRnBQAWfQEJTJ5e6UIQhKhf2tLteJaUxMXsnBwN+ZG2x2bm5uQCjz4/9iI1d+k0IPRTDhkiXvKFl6ez4qEdTRz6FqoZZ3SD3OxnF1DAXlHCZdem41dBeNyvQqKNAB1ZVsIf6VGG03ZDq6ykf1qNaYpt0OQCEUC6dvc2YHdprGcSjKRE610iuo/6mV0I/tqP23MTZi0GYjKAwzA425Nd5zMmfSYKod7AxuuihqJrWoghAkcLTphQ8sG1RfmhU+rLZ+p2JFSoaIPuWbnUcKq/tllxUga4WXP4Cyuut3fNkO50i9jYzgCpt/7tPnWeUvRKLSkupEIIKZ0dn4hpDkuZvQ3GT4lC3lxF5sarN9qdnNg3Y3TP1SaDP3GfDbAK1TYmxZtF9nJh7Cljxa8se8xnYFh/ELKDTKomQu6rp8RArKT5R7bC0MK7zcb7yktnsg71SHr9waD4rmXS+SGM2P5AugXTSy5Zo5o6/BQ+9aNKHECYYuWCgmkgbpKnAxh+n5fL5Imnq+G/o0k8GsDmGarpDYAEQ8KKAnjW8Fo/EvSZI8PyZ/CybxwwXAEVwDDCuZhEfHUmvjwA2N7XUBW84M8/pu/7kjJsEh7SWsVQ24LC2OWUwFkOb1YmnSGF/7xgRdHR4WYs5XZwtM4poenNoIu1hkOOCWrvMoYGDgEDrQuw9lEV8J8qPoIXdC8OMMuReDB8q2nEnJOteDhRy3jGL+R6mpA5ztdNIcm8rg+C6jssYmFU0x7hTSiEo5zolmfiATlrXEB0lmeJxXYiAeoUQJDbzE1zDh947y7wyetnE3mIqyZHDO4T+i5JzxaxCJx5+NbuhanRdKcX14lpuDf0sis9myEabCwqLslT9HfKV72Avj4u5d9DL/FUM0VeguXE7di2t5FZ0/CgSpRdanCBTGAFO2xN84mCraOzS01fzJzDilepepUSBbGc1XEbqrvdQTx1+zsSfYBuL9wHJCchpCd4uLI1TUlG7d2ua4WH89dDb8h8oSq9+9ig4Bbxz05l0rnellULEOzXlcc4tqi6alK9S81S24NIE4MYYrPouvfe7iH6SGfzfmWi53UfCTKWghDm/c4t70SqHzZiODJmzGPbuWkcb+sfeiS5UfFLUucKIVHb6jWmR25s2vjz9U7EhGIMEg2/eUvvivzMz7PGtMRkwrc3/ogikfx9/6nlbwArpJP+XRIl/ln6bNr/A11w/bPqm+LMjz0QiSfThwuzXFFEZYACHvGMIyqmB8E7w0ziazIfeA6g+wfAe+/cR6PVSvvglkRLdybyDzjL64Xy9XGOA4fQmsWwUo1doFJoYDljonms7x1YTnKWD2mzX9NNn4NzsC8iPtF8ykYs6vm9ZZNdTWWgrBAzdR5uB8Y2nz4W9xWi6ye3E3U1x2AjNNQu2+rL1Dx+1Fwuab5cUh5XD/aUBqaKVdQpBBQ2RKzqIjX2s7eE2cJoHPyt09YPJevzjy+FsmRKHYXE4/uQbfpjMs9Lt5z0zQtqIj+HvXX4rUHK5Z9ljZcuyS0P3vQLogtN4ACAy8EE53rpI2UEJVUmwZ5qDGehjM7b5fC8Aq9eyiEY8VuAgGNE+7H/+rSDTeSwtCbQSk8egVxX2LpPh2raldQakpLqmhkrx+d2Dm/Cg5HDB0xh6byJAMWAaotAZmYAEXW2peisHPjtkGZZsXsuSfoIQleQhcAU8XOZ1d1oow+fxum9+vkrjyqX4aO5/GZfLR5ST0aKUKStVBVKpMhoIesBczrZ0s7DEUonyQuRPRO7xx0K08wd5j+wd57QVe2IWfN0DXgSxdpGzrHmnv005xVVROAWW1kJV0wlT5fWJKA9Z6jH1JT0OhaM0+r0F8/C33vVCDXtdg93sUQwwMaWQ43NodtzeTCojb+HotFaqo/wZf3gOtzvA7gIfXdDjUHd3ajOneXqu9Pa4tfTmLiwY0caJaipRRd6M6UHPopXQam8Y83yZKXULhP62J3bCfYRHpc62G0MuXbwIjRcK/X+T8MJk3DnKSnOaMgNnmYn4bDqpz8VtzVWaRRvt9OUUz5cpiH0R91v0DGtQRp8+DGl3x6mGlTvdbhjdycLHJaJJ1KM5lfzv5rhV4RojqCJHQyLsCKDN97V+twFwBL4xm9JURTVvpEUokbDL0IqNnKjXcHhn6G7hlu+Y9vmqUF/fH2cuwdSpc5qegu5I2iLI+QeQqhbKWRbPIh+dibZdtRuR+ptDd0aj0U4DHCRccWM7pqZ+W5p1gRdJZAchXhfrsxaa3VN3m5obbo872jx0qxXCBMES1SqFnN3pVM1QIyuEgvEDmlJRoY0shbKMxvbVnVZOYNbod6UcD0PCHRpcndNnGgI+Wv9QTKwpl4dQZ1PCmIXZzt+OZPJ5ap+AwqZXnjeO5LWGaWPSny+Fpc93lkFQxmnZldLrQgtzdBeHb1zxM+SBhjKjMaqE9gh48xjXqe0WVYFgA+MfJby9Msz48hVEGCi/yHKyhaPMiuUvabiMMYeSlMlbkJBEWsuWGM5Kqgj1yIhJkItvZ/ke19qdo2OsxFye0L8Y2yJ1GP+ZQRjrdbDNOuxAHkm0CAxnLkIC1U+4qp7Ncdo6UzaTJxNtcmSO4pwkHBl5tOtY9M/niW0FugBDA32VnYek2mZI6dpvD78oCv9liG68j/9FMb5nUwBARlIUIedd3kARm73fQWHTbRvIZ5TSxua8GlgczAwIhfMaxzyL0bH9X1SIcQ61L/KEIcyVOQAwQMDdlcwAiXCJ7hO6zncnuh74nDIfnn4YpoaJpxzEGybCVCGUorBOUqgOMx+VFb2XGW/Qy/Pv7aU5wA3hk1ykPIJI5ZbnHZetL57n1/qbp5ptMxtjttREJkVQ9G+LKIGe/Clm50RFyjYRAXT/ynVpmNphxrA8+YuDle9Oj+snJ+QQa9bavZzbohkXEOB22kVb3DQ3pMX86KR8za19f7xPYQsCsBhO6DEbyJpIysV9VTdjj/K2S2o3y78NSZZNocihrUen2CP+8e58ZsPyw3SBX1gpaqmd55UkNAOXp3ONt53/8SyB3GExjPldqaLdj695FHKFwx+E4kvyMAjQMUM6swW+MkxjWLfhqSZQBULerbgDPhNIb1Kn9ESyiL5x1ULap9FlYfa1NzW4SNr+KU3TDinQ3qgtqPMOpXyiWP/kTZZZtK6Qkg6aLu4OiXBnIkai7k6c/WsWPQSHNtvNBQAJhKxb4541fR1Xce0Qh6PfUv+I1iyynyvHm0UwM5M6x1XKMXVNn/6L7KhL7STyJ/oPzzb8Z8o5KffVWYagbEVxbQrBUIzB9H/k5XmgHHEfIJ1C/BDjj51MSKyN+zwJchjQ571xdQ4EPgxuM9oj5BnS+xQ0r5god+Uwz/1uYUgub9aLMpYijA8WzxL+S/wixKBhcfD7EWVvtcYoZw/pFB77Mccnuz/v7TzBfeDr5mRZEvdhlOgggkv1VDI2RVWhrpRjkBh5fRoot2QBkRH3aXQD7XxEYM2c7NSHSJQgrw1825RFCLtE3BuFoEyXFgC8z6D0Hd3l1b4UeRvWAVbxIKG6+Rp06DwTsQg8g8znxzGlM2nFEYJOhPkzEJuk/tPw66EjGyJucy0hFeoaUVaZUadC1YoFm9+lHaNWS8R7NMWfRgViRrzLa4qDwcerNPkMfZnxO9sTDpSmWgZt6GZgOXp2CzcL47UbaVLVLdp5MJ6Pg4UHR7tqlXsrBbgZBpp3bJeXSUrM+4MQKWJZtqNPoB6TLZRLqEVqUexf2bd0gMs4z4GWfoeHq+/5grIa0OHS9Q2j4XqRgCJdnP+oytXzUaNQwI8SIQZCiWGtFKx/UvjDlrfjLeYenLMSsveb2679ZFr/MTRcc1buRnRsPh8jpDfb7X6ggWy4eqdacD4y/J9aHasr82OKyz2rhvJCoB4Tg26EbbzCdGKzSBh+jHlFJ3LO66EOFQJNuF8MKSuP3deuCye2UA0nF3CkNjU+w7xBi1EiO7KWcpd/GVfiv24e1cXDNM9LNiJYEUw6oGX+SAa+DCukFT//HNCNaxjYL+4JTEN5Eg2+kwZ7XnNt3uUQqMu7AWhEGR+GE5tc48PkdSl4SUDYqxA0FXNI4AfFqhvP54eJ/4hKkwgmYH8xClJrtEhJUfwHDRyx1OluVsznYoVmvnqPV8WLEk7VbOoXtcj7Ih7uS8TYKW4YB9pa4vBfRyEaS8MXqE7sHk0T+UgLaFbhZT2+xW+EaySyn2MBS3M/h3b8iJbhcZVpDSJDoCrtT/fWam0AHPcvX00/rGuCPlGLq1xQD/dA2k3hFORkEwiYDDIw98lcxvxS0ke/v+ZrC/7mgFBwrnrFJMPUI2fBvyUtLh/x3jsY+Cwp9WfRWxUiBeIZ57pMKVpDgZYENKpCcah7+S2egMMooZ/U9LKTmv89fGvylHuWnjmEtR0k7GllNcfLpd+zq7OJYq+WzVok3qTdYtuOrNJqmFcU3TXFxIEcEGabdH7UOwTUHH8GgXcsjcxqu33qMZ/jsXp995ros1XPvUDnhGaIjX9QQ66JOmYRn7V2Xl9IMfqfa3Iowu5yKSSE9vUx19O+OjOHwYkf2lseIBQyRTYH/z5sbhYPTHJhwThNCuY1iNt0XSFQrMC6yVk/9lkhotdKLv5dGcG52dZN8KQPGFVeeAZUp9GOq/RRBF2n9CNTXuDznBzrYnAYZopCidP9+J0/eY5iwDwMqFieLxI/IITlrIxbl5fydcwsmGn6639cFxsXUvHGVfD1dyTV9vr7wzcj4jvL8SSCVuVaA2zouS6YgcfHM2MyhTdW2fj1rI0D88ebETeH3j0giYrqFRPzBG3bnqjY3loqQNF0Jf8aNqhX0PE0p6zviOy9A12X8WXDnFAfG4+3LimbFDVbxv6kNHChcDra8YrK1fa3BU0NWS+673mb11bVmY2WgZn2IsythY/p9KqcWtk53E7IxXCJH9AOfVTSWo1XRgkOjwhko2vLuU6ubNA5rNFLOilJ402Ws0nz6GjPFZL9xzltLscbNPMt7l2l+Z91noZuegVYJug76UkOOZ4n6J3247CndJ9R2tnLWrru2c4+n8i4/sPVbngHog12e79JdPw+MCDhQ/lifU14JID62roPr2OA1Es8rFS6GjPsJl9gwV3XTNoZ0GvCe8LlAkd3kFNIEjeH0SLqK6TZtcTXuKhzCtwHKh9aygm5c7lY5UMfE7d+JlpdWhT8gMBWt+HgLXVVrWfDepXgd2xChjWRcmQWU6IXkdaO8FvlU3HgZTvx2vAQEUPQV/KY7OEQLMISL/NhlXhVPIOsJ4RdYQxA3KxT+oNszv+IHUUcMJ2VTL1sMGB0T2ZH7i8sHkKeZdcWiybDJEBsbwDuu8ZYr20wNuY3nj3Vc/Z48oomLAKF7IF1xWcJtlhWno23fUa0J8JiGQhj3ZgxTssrX4xyLcAwpiNyzc4pna9E7fZsag3ye35SoggLOZMorfYTtSi7VQyfGD5HhFzj+jqmQf5cHU1mylCIb1wT0OT0NtMGpg9kzUgxuB1fGZYd5557KPgnJaJNOV3dM02xILzlNKLf5j7rTwGTMObGbx9bIdO/rITUc4eXi2Nf2WJMbIq+luo0ssJcn7BuWnqP/HJCNw3SdjRn+Lezkcnh9hUd+D5tX5C2lG4x6RX36cQdjvzGarbu/rDu/BCz3TCgpXlnThc8o9Uay0+XLaqX0Lam1jHRhB66X/saGKp5R/Yhc1i3+H64Lf/IONuhCLDJCyXOeb/5j1xiGSHXWUMMGB2UCMu/uKho/Mu4/onolqsTqO93oK31KNpuun8rEibE3zOlDbkIMSywcYPkbOi8YWgplaQ8QdjeLzPsuWVYFbWKIPrVtRAnYKG+dqXVRwdjuAzWTIfnEsxMPglp/zFiss/y53QZkmJPKc1QohzOrlt4qOBaobVuApjheRJhhBXlBlYuw0FweVwRZwmBtMDT4dCWfUjVdzGW/Gj3ImDp2z03GmkIRdDNCrHeOHDoktdTbDD1X5OOxPJJZ4WCqKg0PoriekBjWVK3oJBqCfUthM5xink9xuqllBXEBaZxkLWVaYL5R1DDVg7rj0VgFBUTW15qHTstGEC3OWsRssnyTgme6SK3eSzJxxWRObZE4sgSrczArcuf90NWCNrMdcGsN0IvZoTwryBqOZYbTxyQjvYWkI5BfzTsjH4EOyGwyiFPQVfzL00qgEmhUDSBUoZHDnnAhTtNXNX9lYl5XlajHTlqKg8ucRydylXaqklC9LNpJA8nnbQKJif9X1pVcgXuxjWInZ0koNe8tNyKY4ueSObpvnwkpqzNeFuEsC2Zm/eWdmsTIzIJotukr2ndjkRrmiq+UkKCuGFD1Cdh/gajibEDEMrkMgXOuKTMF8W9jRr2mF7gByUAfczYNP8bkrp8tqFCkuazb/IykxqN+ydzAGmRqPOM7KsyDZDNTrAMn+UQU/URk3bJPVBxlRiuRAxnxdzeFxu3fUZM8Rk4fAN3HZfM8wUmREgsNpfuGwzupl3QQUPG5g92dX/pI4SdJe78QnJj4/POs4+X4G557JM6/IBR1T+I9xUR//nwAQmb9U2KN7PGaQLwNefV/sWWtNpC/FpZoJEymaRHRuINvv7Lahnerx2wmIT0PTreS7V40ZlNlMuhkKUzr+W9LIG0+Sx3QlHbMA7fLQq3veoCwht52TOperPJxiUWlgyISO4p4uoDudPwOpSqqiUEW2SlaePWxaX7b47u49iHQ2/zZ5ao7ixs6u2XnSqp/yhAZAA40oJqpclIx3bCemSj2HyfPBXk+2WauQDzhopQ7ifzUV2XMjWpjIT+MmpFYQW4vJeM44WnJ1om8FIfEZoQWaBza9Pf1NxQPpyhAfOV9fHQ8nVzrvWwTV/s80OFnlMt5wkpXixCDJ28HcMBn8Wj6wwfReM5pUUE0k+JNT3xYA2ieabii9D62/SYslibbizUu2cExpE+/nxSszLsRx6bfs4c49DIVtdozAfxquryho4sNx8pq2QluxTQLA3qevTjkAYWnaFaTcAVbOdSGgsqQzq/136a+sjFWmgcFfRmrJL+vMNzROVYtYf4HKSiA1hoJJ2FA/Engg1dV3b0SiZMHb/Wuc6+h1n+RZK1wZtmSrnpIDrkwwQfKbdNIIvJ97l96DwWoyj88vk9JUolXVySlnmxvQ+Hi7XArk0J8xwI0plMaZbdhvILk52SMvssJ/hf4T3Wl2NctnU5p6XqMFj5581HZryuljQVzLcTBLiNkIKSNDU+x9Z1R24qAAmVVVMrq0xDhrJoHt0qqNxfuyZcJhG9/Wx3Ey55rEX/aTDVR3KoxnHGg6hWWUvZmjK72s2BHIQnLENt8aJ0o5XdnHxmC/6qvLeyTE9kgfP9kQZSAY8o+e+weC+XPksUvMTObyZaP9PsH8eURBWd79DaUfrpKMOPtwxxzjkxLFsg1fK1el9iMUmBVP87ASUg5Moy2loItv6Ui5ZYtmhAK30zuF0jYPEd8Uf8RdNRmLbiKP+e45YtPIZB53cRzf+EpyveHl8sKhwRIGiL5NKSSAKvp+RiHBkMKzKhi890aEEHDB0Xp0OPsjCHYyZ9V13R1u0kvKotwkYfz/ISM/2Iseo1n2jy8l9V5zgye7izXqpuFUPCKb5od277/w7XeXHVBwzDW3d7C2G76FFFrhZila4t6tWj1S78fQmerwl3I4MYQCP7QdollmZ+3JhtXkZ3S8RS69f25luZ0ODQ/SI/lslj2u93xWyUOtWbLsPaQMoGCthYq0CDwho8FgEoXxpD/iNX/Qh0xZUhPMRnFS91hY5YwImKEPIuK1lCxelaAQQkaora/ycTiI3Lxe3zKpnrWiN9XTtuZV237n6nNMw5dI1p4Ul/vSOXTsIBNPEgV9EZuX6zl2umyWZVv1w16W2rH18cEeWeUHWNpSL7/F9JWXfkU51pZNZOr1AfnQjzZGIS3ko4CwOz/Bnehw6EJ1Yzk6Wz8W64cnklH4UDsujSq9aCOJbWxtri6zXTC34eTuH7IfKIs5xZ3VIPG0C3v1BPhOHzUNBIIJ7ZcCYOdTwa3ZjIXYBe7+KNb6sLjOECoQkmN49Lg/SUkksMbU20gkkik+69NT1F7J5r0Lhy4xGIgKcZnzfAYNNHrBeTQ1mm2Vhy8HPG+q69FL4ZB+8hnEzdUH4E59cqA86ixCJYTjahKGiOc6qxYNmPLwQ0O03QagRBbjBA8pyPlZKX3zwt14DE/NTMRR/3pt88xxsGVdM8YUFxizCYrg2Mex3Oc7w0eP2Fw9gai2Mm1XeY7nko0gonMxctyzYlyIqHLQAp+Uq6ZmSYrhdvxM5dP/EAM6eioXLe4PaBsHPqHL4JTLsxn1cA4xj+v/bhoDQszdQlSwQlqUdEo7f7vpj3uRCDG+xmyelFKb9pmbG2n4HSRWLiLKz3iUcHkuXJNVi0PR893S9Zbe9AoPXaw3gyhlPxg3xLa2PAQ7njq/jkmvsGjYxfabfc0gt13L+B94XrDrb2CA4i1DPxUK8cAOaFBwRoW43QQXRhl6yg5j2ntNmxcEl99JXXA1wVt2Ni0m8HjyIG9JoQCPnwdry+0yVm5PgQ252FPWG3ARvY7KDvn0t943BNl87r9RmkWcFAsdAE6H2QXMeWSrFh05JGQkEIPdmTXLIy4n7KnN+fo76HEvDdUokVunZsywN6sQ63MCJTNXU6msW51M9KSHQ/q6YcPmlm6m7rCFHSSVIX5tstwd15QQWXU6M36e1d57R+THglyidYGb766mc0jm5SACsr2D0PzY/QDZ0zmsXIKF6yuBHGrDsogriL+B06IPm/MGAguGEjzaH1VezWxOCvq7oc6XDPiKd+QM/PhV7uCT5rpsfFPZuioy+oQkt7mJAuRMhgWRgtfNze8ki/mH8baFigrYra0g6nRbDeAdPpb2i9BqQTf+FaopNi4EGXWS7PXEINCeQOylQLCBI/OFkUCVNnHq+hJDj/5bH4H+Fg/XHoeufJw+Uwuh8PEHu9qWi/VXwazgakGdfGl1yqi79R4K4/EEzUp/soXM6rccd85zfh2vqutA/y27tGkTjRPj1CWoEfJa1c1z/YuaZa4FkJQPRSkiQJEUK3sp2uWE+x5kYzHHIKGl8dRrzv+2rlT/m9izdD/+KXp2cnqZVL5X5h+e/T3HMmFJwGGsZ5Dxz+sla4v54yxLBFQoQZTHCqxnAE6itWbBDj7SGrD9UbcZ57/bAhIK2afY8G3R0Msa7fyhH+afZqsWjhXZFdtEKYQcK8T1sFN5lK4tJjRL0JCvXo92Uec2VlMm5xSUlV+bdG4qq8Qu9aUwdaMqvb9fhz/nXDoCCuTxwReP0gPj2Q1DixkerIDHlBzs0sASej7xk9UxTS992HmYl+oZKRKqh7DTopnQ32pXZzR9TxrqVaZk8MKbVg4FQ51YamfUzFEV6as6nWF+XcmkwN+YuaL830OMvAG6f9gC7UuihYDMrg+wDIl4Hb5Ad10jZbCZHL0ojo2A3GMMSYOjrAkuxKb5OEDcioHovR/kr7K2qSHybDuvLQkJoqsJxmXSvCtvVLhaOTc7YXsPX5ZcXfkaradu6DPhU5n+ghQUt4C/CjXMnwqCH1SmNcsbBZiGaNwBX91KB6GRJoC/sHWEwcLB19bLa9pX11d0HoCcoSGeT2QLzYIAcb4ZEmqRfwcA1SlPcnnWBSt72+jybiHU/2JYdUSKisx0pXHnG2zm9ylae0aye2Q9FE1SC7D+dxqYFj1gPO7/6LifLK+0Fb36M9S6GPosC04UPpaMjDMy0Wze9jEB1wIQfksFeICv/fxoqRtPeJT07WR5G/fmKFHlJaia+5Db2RAqcpjA3/2VWPd8gr+o74Zr45NxDpL31JDb9knTOsPky+1UNA/ZMCLlwJ+JfAaZapKHxkyW3DE2C408+l7PgwnowzZrslNxqKfNVtJNt2TOI/WP5epJ7ULqrYcLQQK8rPqUpoHliP4oiN5uizkBUcSTZYThUsq6RVYRSrhzz+81iJv6sjJPlSJuQVZfXgSsGfB8bqiGIf3rjTas8Y3GKJtznMzzG5GYuzK9BIF8ecAfJvp6DjtkU3csUci3Tw9FqCDczNYBJVrb7Pdn05wUKe1Ozk4SBO63qmehaNV/u7YSrDpdouX/Z6yhRUgTSo0ko6Xrlo8nasYgwIQJKgv/vkGb4TAgVIFOnjQtP83zmub0p9ZgGzO4xe2DozksG1VvhYToKU1L5oUD/6yZiTZ5bwdFq6RDJ2FTaxPnIWfv2NszL3klaAgnDDZ1XM8/jCPXD0colB/KikyD6PiarQvX4UrNVYxT/if2piG58CZvi++s/mF+fyXIBgw1MU/BqOv+D7+YgWilkNgkWdEKknI1hrRQ2KWuiu83KNUvRnnTio/6Fpktm5MFnd0Wq4Oa8qM4LnM+wE9EIUFYqyBdclhIvQ/D4KzM+trUr3qnXOdkQKB6dMEJAAdSMeHkNYXPXi6lR3oz2vNO/82yse4+I/k1a0IS1lPSl2UIDjWKgsHUn2AwuQFPOtBEs92KmbtWM0fxVz5jqoveSurhaMlLAkuejNK9+npE2vK/sA4Uo4kMSbDi+x9nB+ScKYK5RJSYiTLltPXHY/CD9a8Hkzu1tFAYT03TImuSBzGV6TlIb2rIhXYQ1SHTTS1Fz7SJVk/RZnSqvZ/GMad198/6IL2iSQr5Vg8H7lr+NvyF7uO63cVwW4BfYyo2uvahZZhkyxKfhSflwewxkJOwqKqCtaTOivJKLQzmyD7AWa4xWZFwx5DsUeVcSSWnmzzCBoZP39a4hBcrmVLl359hmtR3hnek3sxkLejWqxGCQD9Io/yh4kzkGy/78a/nNn0USHl1cVdsAiYVLD6klxdyqsg355DeMYR+lTRsIOjp/LrubsVbmx6gq9VTTrwv0zXqXwMLozjUPs5AAa7U/IZLn5dH08uABzG2Fdw2BBpzN/vgTOYYkW7LhAmu5d6xSgzfAifyvf6DgnD3nttVyNGoJ/Sf6Ys45lLSQuMg8BaYh5yoOm2nvp7WGRON/894vt3lsOu7XjgRkHuPuOucoPhXSTQ7xfXXUc2m1o6mHzRVqQLD+9nI4oAq0iHz6JY1u6WvwTPmAg1ftStBtGWn+yc5uKZt9ar62BFqfafMtOehJXQW0vfKhJrWLKBzWzFXpbif3SHYbgVTR5SUsCJde1eqX1LlV96I85OpoG9jGV7pZDSuonF+3idKNnFYs9KNG8sKaudJd0VaKhy6sahiVLo1KkyKq3WmLWiODXm+iKaWS3yArw/MsgzrdDfP9FxxN6M/HFkm9Wlk2pAuS5VmrpET6/lB6RFpWxZHYvA5EfGWYgKKMV+TIJr5MUsNy8okjt0VIlQlDWdQgx51CYrUptZ810y7DIWRrNWh3ufsGQJNPvuTx4kc/LjLTzq/whRfLCTez/o65T+QekIPERovG6dI1Mt75bhutMCLyVFEBR0GR8cqVd0ek+p/qr4vS+ijrIRmMaFmjjd5maUxWqPgwmsrL4+HqNTRHDF+vTFGv7zL+TbfCnuB1JhvUHcn5ZurgyWNgAJZ/ELeZoR5DrYuT8Ltft68AhpOju7e+Q1j3N4fWHpNGtRr1mD3y2p4K8+LCw1RSATIjDBLZUG4GpGChNCaXKYsgqfCYfjn6WjCQf0SHUGowqaU0o42sNY4oYslxhMeUggIDYPc0lfrQ2thyh4RpfGW/Ga8j6mK4uZ4/wtjtLNpUCXHWAAgO6ObL0laiakIPvediBnBwt1d7hV6CCVXktGLCgqfgBYXDTkt6UDF6p2HgpwXJGHfBNgLHxrDjiZDfLMFf5KhuPnSfyX9X7NJjpDEeXm29BBVlAAsdP/ZAQLSMgIJo0yNXUhmhK4xgMpVadfv5Tb92HcFKdBhJkIYCvm1QokTq0LZYPnjrGF7ngkihITOcMrunaY7kPqYUvs/hKGTFPSuPJfBYMhZpoF8mQlYNAPIKg3ZzReiWFIpvWpqQ0Nbr2m/N1ZnGDzy+0n4a7/ZWtjcuK1cEr8E73GNTRV7sO7kwPaUfa2mLpZuy0e/Udgr49sa1354maEPUtC6c8+9cJt1ZsFoo3jXMws11cBnIpZAHK5kOEd0cHywAb3uLgv853lCxRkDfnbY42OHbA5QV+bPZ40RIAqydNa40prG39ew5eOlIGvPCxz+H6nyn2Ivb3fE0LP3yile34Y8NN2+IO/RWyLRC98FyGb+NWvLbEcUHEpZMVBJOdMcuBcFtfi5B5mWJsZBz4Qr1c6B5eIT0GaZ+SCEt6i4gaIRSzrKQ1HumPFKzTJsbluouKLhNAI++cuM1zr3S6ktVXGt7F9qzY7RLAdZnQfZDbzV4wRGCKktIX11k/mkr29N+8Z7SD+Tc1Vg2rxAFGxKyHyZ6ekbhURP1WFb620b3SBiK4mwYJzZyqkvflRznUQ3W2HSQp8mXkKOnhLZgC7KpC/Fz4W0yNyhTOq+AfeQWhBMWJMyx0hwMsTE8iiQb4V144hqm5W6XdhL+JkQ8VveckZLNhbmDv0KJhMMy7e6sikHHzDwuqg3ftLjB6PwUvZmiIgcKiUc76xU4OTY9aZtgsLM1n2w+9DokhReciZkXPdRV+frn8zgFu9Li9c3y+IxAdQYq2TMAgq4XhDC1AobktdO5p6KqKDgRyGZNY+vyoJSwENmHID7NmrQSarZ2yt39e70+7N/qXcuQbEJ7I+x+b2NtQaZCNcgeN71CR0hc6NuOVy92Jnj5boIHZlcongU/eXxbczq21J9Yc17D1Gam5JkGc3yzpiLu1I/Ydu53nMs1o3YJPU3xR9x5i9DOd+wQF1HCS4O9ZovC3ccy1dNZw53bg/hmMbtqyGTIbpgjawhpjH6ImkV/70BlYlyK3d1Vws3mxTJCi6D+BDiFP3DkIJy9Hgix6IVT1xkpZHC6wha94UG5Y7b0vDoSSDyk6b1Q+pupRFY1hZwGclBMFsCRu23Oi0kjNRciYLDv1SNxji9F7a0C5l0PFmYLmhFBMSmlfSVmuH07mQ8ESsKs9+J2Iso0Uaj5nRfeJa4oh0esScf2VMSTYXSwK7E/SkCiaZcIK2s9Ep4dyIapTY4uWTzWlF/fPUNROZUE9NmrW9xyl0VdMnne/4Hf2eaHCEYFAFRMu32lNRHH8KcfpJV6+6yH9hsX+89vUTEGg272c0c4Z7t5f9v/4/OJiCBWUMeuOSxl4Ak/6Qa304t2alFq8IQuJQcOUFHQr5vRI6THPv2CBY45ypHWbOZBW1ltEHs28J177h9dC2QULVsTmiouApUSfMO/cDawVma/LfxoH2QrI9HdAdW7E8+wlzS8i/geDShpjNn9uhQa146Gb0JW+k4vcvohauiOdgk3nGe69cOfhspvfaqthC6MCkpJLxg9Dvb7jOE/+4z5CftENopHwFi99DhtEWRTbN9htWWrQRT/b59/WvWk5rMsKrjkO+Xo9Vs6IEizoToepr+9kJ1MbIlU7g5sMwVEJlVs+Nv9T3pdURol7UTPz3WaUfxF64toaallMHSiUQO+xgvRj8wp0VjCDwZrmL3AjfUtxlHxfZ1ESV4xgQmy0Hk1RmRHi5iAQWEOOZ1JcC26uL25nY16Xytj3H0JHc7xwM1crSODpSuzH4JRxaS8xA6cyRvpqhyI1F38FHX0cTOFAuamj2hgDvy88sSYVPRzxgmjepvaVtFdvic3xmXZZZB5R1W2QjbDTR0Rf7saGs/L2yeDYDdLZTpRtoFf7gYDk9xbdnmZb+wgjt1KcfE04rbrxkYSi1Ht0wr2EpiCu2BX3cLYUi50+v7+MZ6pD1941v5piwZ9Jd+JtpU4icZDAnKtFgSy/OfAYv2c5UrmIKDa8rgqz0UXsfR1YIi7cOZlAinliHbJhqfTQKSoxinr5X8fntbUTtU1BIu22QLXEOal5EWNVd/Y1Ojhn3RUNhCJR/dlfko1QGy9pO42GC7wWhkjKSLEOpTQfYpyWmZObKoOrMWcTeHdPinWLmmk2ST6upJgakKBD3Cjd0w3DDpdElyJ8BmMAIqEDbt2FUVCYxpfY0U1f8oMJBEwRvrFIWB9d5PDeCJ91yF57MTDvHtv7T+callhl3t2h5gSOb919mPJ/jEJ24lsp0odJEBLc9EUTL+cdfNqbPfEaZ9SxGbZyCaku/0lEn04qSy7TlZ/g1s+DMR8nQZdLwaxP+FeW4TxoouUopKH+OzKfWifPAFqBDKxVgyRcX6DYNxk7+0dIwWpSSOzFvbo/AV45YT/elaP55bRQvWzVRY9/S0InuBuAszyMboV6WLwHxsnwdrF/mf7aDSPmge1BZ0t02KttXz0Mo1XwDS6J3vaabCf9auYTHGjRwfnOhZd0Rf/htYKj/rtdyRAXoE21fOl14JDrVLuS5RpGzaw5iKsFJ78aY6DbHiiDwhpZMFcUVdlpt1m/mxk6ld0jCNlxoLGKbNykNqRipimyDadWZysXtYb+wWbegMA3QC5xjwnWcP3LHxr5D7fY7uVh/SEhuXL5P2pRP/uNeUrKHz+fzuiq9yygjfCM0qG1L09khY9Oz/3T4vfhdTbubLFFx08LGGkbZILEs6MNNEMklNWfIPbFRizntZUDosuYObNVSOd98VuJmt7K+nfQHxbnGWuUV0UrH/KFNEBhfyoDjD9ncWD8UJ9WSkxCjVkUarlW6UTObvMcD2YBr86viaEbgj0A2o9qqM9df70VwAh8f3ACxHKl5uL6qi29QAfKLGSKnJshgkWUBdo016pKkF2dNfsoZs2B6pMJrT2zv2AmWZt6RWkwqWh0IYwk+XX2+5zh9qASpzYZZr1Tpo07hoboLZatUfJKt8POCnM+SVfkHhc4SM1Ojd60ZjIdc0m49Ydzq3293HXrxMebd1vzIcCecgLtNAFTdAu1T/PrWWVGw0yBdL/Ngix1puc4+Fuu8Tm/sf8DPtVwSsr4ck3iw3M0AqDSGSnmvbzYearaogN9W3/6CCxnyMgq8KG+FLYbZeLrXBdqA8wsJFkPblXAHRKBPHvPBYP9HnFGtqQSjO6ASjkE+3GSJMYAEYkjq7h2FmUYlVB31xF9vJIVldkLS2v2RIIOFgRP1kTr4tpQJS6MRitwuCshOcMfviYfHIWvVI3zr16czYuWVmXv58lY0dg9dbaIgerjskYAtFVs1TA3DK0XLd0MQK1zHPMdzQ2VoFpm0PiZUfYLphIsNjqboGO5oKPo9P7n38osMfjKItdhzUxyLAeu/ghHkZJMPpJzYrILOCYpgwHlXIN4Ui5nPEil5V9RokV6im710ha3R6qe25sQRpITYzRlyFJHHNZ5HnTrpXYzjo4IyAitZdahzGNlh2dk1LE7tQ3Ae+fMmEaITUParWZObbQcNDosjgH0TXoBrg0BW+CmkR03r10EbZrrVUG/lh7OwXleEWv25T03cf6IhyPkgszyAjguaELhuN0FmrZwf31Z+wjxEU4+YKPudzLTwldkASjpK5IYbkl+MlaMqE1Y6Dg9L1SlGlqJl/Mhj133DPkmWqGSTwYLXaCKlVW331KCXirvHZW8dsvnpwAFL5j8d+dO1pNabfrTTEc1XrxvvmDt9Tph3krureCoCcfEVwjzdqfpTUtYUYh0GHjem8yNL5j30GLqZ/NzRmZCpWCs8e7x37IDAuZPklr1op8s3npXrt4HDeeV8wH+RLEzx7vxvnlwso8lRIBOmLIOfkYnimYxOOHyT1u5/pNuDYSAGAADA2LZt27Zt2zY+tm3btm3btm11iA5yWYrSchvJo7vIgW+3J5yDjCMdC5VBFWYHiQJkVI2QTKUAJ4BYvZak9CDGF1UGVdFfuZMoHvfnDbwmJiIXRQK3vqBOZhqo6xHQ2BwkhiF6NstPNyYQj51plliIAiEaUthBQlyA/MB/TGslf93jB5yjhAw/vy2jm0jYsx4guWLd4qsdWPvwBwcbDgXoed6dr01jTrCxQ0ryALkLxHa2USqRLIKAeJoejTpfPpeQhHwBWP8C/jVoFYKqR0c/KbsfQZZew7W5qDWea37jrMOcPT8CGoamSsMImDvAZPJtu7cWoaUcbho7HjeIuhEt097u4tZTt+GgRvBsOdjZV9M3nsyZX9PeV6XFNEVXNBZGvysdf9gBWoRH7oXLU0ufJIczMxZqsStPsMZtAApA0/IhJzPShjPQM8N6L+jfqIU8E+rX9V1oVa355Le1xX+x0KvM6kC1ZAAlaS5nvnZznm1LGgAYTozm68bek4vss4Glit7qatN2H2sSozoWQknBYETxgeivJNIV7ZM4UPhznM1b6nraRLt31+xt+7kfzVoZ4TjVxUBlPx0rRK5+EH6WZih7MZeFGgHpk5mejb41P/l0sJl1kEKgam3IBCrajt9E995xYr7xKAgrgf3dspvwIDTByHnvKS3DDLh+OWX28BoTJPNkoKLBYXq/aIGWUvH8mTjkRjNe2A+ZKHs5xE3dTxUTk0LiikWDV4RhLZcXR5ClfdD+dYDrWgBmePcEUoBSTBSxqQlUdxSYJ24LITCvkqQ3e9XwM9XGMGv91mRFxmouZpJPBEHi0D1YTwN5cc5axVbYjdn86v00Mznq33x0NdjDmu68ylx0IaMUJwxx/+52TomD3GERT0DlK6dGoYKRY2+v7PC4KnOK3WyqBS2klvYih+bDDXrzBLA4lpCENxgqw1GoOJqjDynXbgPIuWcOwDfHSo7vITAikN+EHT5n188mYNqbFtiTVxtJb3c60vFZPvj1EmPtjq0/3mTcAthYgu0J8a/KleItPAIPU2Cw/H5Shti8lizCzbxBy5N7FmIlf6qfaIrLstIqFZsVaKE8k9HtnrdOU/RqJfMWwa55OMaRASj7lrLRbpdEfYTv79vtHp25SnIIFDhvET+qhXomeKLNEuBwaElaNFPSgz69/rsa22jGjj8CsavwYuGjGJJqP/cmg8pCK/Lp4vihV8SpE+18X4jl8ypdC4NEFVVVPaepgKWfhhPsQffcvMbXVVcRrNbZKWYmLLVaerh1CPSDMwNm3GaoC9/i2j78ABhiDg2nnAXocZLkO353XqSyOVVN5BY0edOjC873a97i7RwKV7Ly917UF630mKQdTwP+4GJdUwtA9Rc5+Yz3EMS0OxBy31bVp75IrhWOl50YD6KPKrpMMtQHAxfvE68sgxYLTchvVZNrFqx2wYaa4ApmgU2VDiy2PUUjC7Ro4NznXNARB6Oc2k7sEZlnDpte0Mapg54sADvbcqtrP0LIGToEYmGSN839i990ZfQmfKt+RorSvNumv1Hid2Z9DaYOsjJte38YAUFCc37PF+tzWrnfwzzEZqH1G72o7Pc685rd3LxhSrdzCHEiPRGu41KaOCltAZi7NxNFoYks35E6QqOVdLN3u6ogFsI92WLyd/7Ia8//9M5rVjLz1oLNb1g/9K3cav5SjfaogXkdt7NShla87hlDKxjpaGqNrQy6VmJGJviBoDdLvLavI5Wyu8XBu2d0+LJwfQusQtBMtDFsJKAvYwMpsIl2u+iegQVrilIQeZmTKobap2uIx9HAsbud/rxXXNWS5Ode8vvwjp+33aMacaDXn1WaoTCf2h7J1OQLkLGIxHBxEhmCMH3iVCYu4tVJXgetpMCIaLrPdOpd6ZSRZOc9pawnDEDjQp/2owH4alRVw286936DnDlIPWGcm6+VJztxHVWyfdvwFR8XzTO/0wEjjGSSbOkTjOyR5ewaFjhsBzgKGKNDlIKU971taiKD5VkdexHTPYe2FJ6yRBAOGBALb173B/Lqsxq1g7sRQpMm2u2Njpy3ztckvf7ZBD9zY/ockh+lvhjovwX415JxnHAnERYNmRDjN+rqRrMx7orspNFwPseTXwa/RknDH94Lic407TL4mclBYG8kTEHGN1nxp/CQNwO2tYi+0gG2X45EytipQo+GZJzerwNkwdB9nJHoPhcuCNXUuNtf/shVttSxGB4GaUkneifOXpZYsqr1QX0SZyyu3NDCdixSznw1jnmDTc04Plj/41n6Kblb4ZPo61Yknr+25k+OczIvfvsk0nSE2ufGDofz5x/SxyepEX5daF9q8bk/rCjQh/SxNlXL0OCFY840ZrQ0rnt42H6vRHcwsZwxDFlmAo2duTAhX/Rg6H9I/96uASJxZMm5DWt8woWgY/9ceqrl0XhX475Jvy0oo3TmBkt+vCG02q07zc4dJl/vGDWWssQfDJgEL1EHKfw90h7Nu6bMYTtwuoQ4DssgHaBP31TPT59WvGkMhcwppT7zJDWxsspDbmogxPp9xzyV2Umhhh9cvO1heCqhRtyKfL4Xowi5TEFzVR4DuCsQJivQJw9rM5FmiUBkK2xVtFU9ZiXCymU3AEEpUl05AQvbmG9aN5nZ12mjlO92UrPLk9sP7hES2IWpyUPr6moRp7U0Pi2T5gXr0z9ppzifZe7tNdfvRx4LI6yodprOdNEOSY8E2kgJ1Suu25svwTFNV545lC2GlR5wnCBcMIMnCdcWA9zq7e6yR9HxrsidGd1FbDSSHDOnG5BOBBkBMtZhBIsDzy3TEqQvG4vNXYJv2KCAFx9Fl5kWdKjc+o+lkhki3Vm07QKVxpfwTnE6A88DP6huuPLIm4HZcDOVD/md6EN3X38TEddKtBq/AxQi3surezXMTXLPjJ6bj1yQgfb1LRgtUAosNQFHPLbuqwNWYusPTFPp3gEgInVzUqaiAHGZEOJiHnFmqSdArc5Gr9T+cOchg+7eLGuZ5/G43glFbts4GcHhhmmPLo+IhS/5h3lqgOhPIBquOzWXM2M1STd1+tkM/9lACofk+8YeT2U4dH6diop3XhrcteP2GiAuFklE7JUdMlY6kn3Tfz9U3kF+5tWNLMA+wluHI/slyQiPOzSFUCg6CfVjO0tr/jJQM/Ha3BWbzN6O8qZJptczX/qIffT2FLk/ssjvYOKg/Ywq+TiVCxBlMb+g1DucWktF77X0eM/GM2D4hktlg7ELxMfmsFF6cCCuPFq/CVNAm0cie+MYcoiskCJgWxjsaVtWrq4rZ/oRDHMPxFFd9Wsm7JDBnc1vEqpPrXjAD/vZ5Vo0tPH2WyAEte3hYeZJsHEkI+lhUdw7NxAz4gi2BlKAvrft4RqOsxlqAtmAXSVufQmxXKU8YINbs5ih1Pww9l9v7J+vwMxxTsS0uyuFdJ01j3sALiSf+EfPoFAohwaUzR+lvFa+anW5fF/T8frYzZTOS5dEmV13goSZCQDQ4eVC8alFaOF7omAT7poTMCuwaBhzFSau/4xq1xh67eRrwGnyM7IEAJdbP1S1+xknFhZfE78ElyxfU4tTGS9q9ttbVgqkYxuO2ZORygJ70MidebtsKe6MzDNTZTjFLRZf6NAEqoCJp8BI6uJ2/Q8JNpVXfSEi2LqYrz3AzB3x6bV3FDadUiyT38n1BKphpxSIvfiG9UOIshOzJnVBPbC9zJyRzHYI3r+5TFkdlgrchSH2JR+li2GjcuHPmJBlXg+vax51XhrLmUCg0TNWS5h4kh3WtZ74SSKkULOVlkEj4q3BTygfgy/ChsJs5a8ayYdhhPDEP9jfaMGw91UZsMH5+JpiC6y6kyYwV1g29CQCBsAmeaseKKx8kDHGaoy+aTQPFyFQlTR/e0ECzLHGYrBvK0DlagWZBS7CCOeD7iTRJKC5jF99OdRYBFTcHBX5Ol413cdF3149O0BEgX5cMwCSLmc3n9gtNzs5pXmivrktfJyl3Z+NAsU8SYuUirB8EM4CgPtb+G+BUelN2sd4he3RgUbkFyJQiRkRDErERqamtgV2PiSKf4RqVxvU8a2UK/k0EZIY3yvDbFoJFOZbqCbQ67Po/0BcNvPZ7XFwBdTwOnGIVq8NSfxSaJEtTkpC36mxn8fgQSFQPpHO0nmOp0JphQFrycE/AVWmCNtGgzXwFO/P9LXGk33cblOFOKffvWP+NT3BmOUWVAgpIrNXZVOeCgUqS9HZf8UIVlGhYyudSBJmAdHyrPcPp6ai+OI4jjTHpkmugTqPREsFXuGRhj0MWCMr/dSj8Dkem1YwSCUMsZy/fMKQIz88Bkq3Rnuy6PG3ocuK44bx2wyJcbQU2x89iu0WaItzU8k65SyiouwlzwRVWG8a65GRe3FG8EKBnK/8CwyX/OmTQDWCcZ6zL6PB9ywFJH+nSZjqd+hl+Iq00+cJGRlhEuWdZKdElCx9WohEalral0Cf1nDIgETU4IXmH9zcOIJ9KeCocu+jphPgreFwjvK+m6TWaL0oW6BSGLqSi6ImxGG/HiGSKAZRNip9gtIM2Ld6ZToevx7cK0w+tCHrFBItYDmnhIDNlP/pxfmghgdz6grej5htC6F4L7oAF5lAoJiWQH3NMX/9gaL27qEYCLpU34foYy16yysnSRF8/VwU5qzrSLN9SkNyabGh+Kh2lwizyBt/tykFoUbsFYACNZg5NHR2Yyw2rqfPTnInwIn3Y1vpaPIjV7CLkI5ju8ok91SyMuIudvROjmJDihHd4SgYJeuAYU2sjWalQtOekySeakcKYiu5Jo91PdRI0+z3XboAu9y7Kj9b6Y2dRwfdVjqRab/tPZhP0dqrfc/DChJIVTa7cFK/BDBpAxrYk48w+BSBEGvBUICwsGGEJtrBY1G6K/YM0SoKOcRFjCFDAtxZ2ib9gqyO5F1Cd6sbtwSbeJbH2T52nmRYA0ehcLLXHBxvSm+kp2oZmjwHLpKp0oAUz/KWDcKPjYYd/6l9mGl6vZW5bMrFzxLdTkOAAoz/nByZMn/gfQsRCpS9VyQrFjLZciebm7zryk1OnFK5EjHDHH9+2tJSL6Jy9rhRKCPNLGpw/BW2RL2pCs8mNj2xrO90tKV/+LebLVg6Ni/Sd+havSYve23oogpZ0xtJPG4hJRV6ZhZqE+ckkmaeRJaywJ0ACCWKeonnOhcNTE5KK1NQXamBUKeMOArpSQKatPk4flGz1Z3Ah1ZDuHeMIRomKik/fuE6XxX3AR6dh0UXCKDtpJAZeBGnEvA133IR8NGUeIlfOc9ZE5u4cxouEuKBKUkXQ0Oj6kb2rS8kTRyykEDtgNYyx/VC9i4jcGrWlqqhSodaA/HcAAgksjkLXN+7/6o3UeYQ5Kv5IYnCeMYtNpKT86ok0RV8OwxyfbZ41FYTKdrE87qRFKB4YSvI0PTae0SQkDRNUMV3LUV9Qrvl1jfNVck9B8hlm9SeW7vjfiIITwVl2qckEmcrdwTvPNyfN+e5zJ5Cd63WpEf/kXsUkQBbL2+J7vozGyWmE5lTXdnVcMc8WyjektTqRG5nHXOKbGERv+OBkrfIJ6HMP8uhoCR4O0iX8sOLv6Zz81cI1t+QCmg3vvubKFn7YHca/O5NBpJM2DNLJYuRihacwnD/AcQvlo1s0rY4dN4Gh4kKu6d4XwWWAsokz2PU2R/Hw6AVl0d4yLJD76fZxLx/qM0UQo+/bSEe2VPhhnEEeQsDfbsAQ1tDGGsQoJFDTSSZpCos4HSBmM/ews4uvBmpBirUmkKpRs9/L0tf4Du5spWCiI7gsX09UXvm9hS1A+0emXPPzI7PfvH0jihj2iJH0IURl5YzsP0cWjebYks2N+DdI3RMpcvagEnUleCmxeI9y9hQBJc08Ym1XSskLbAWow9Qi+2Ep/IhYNZaULieq/2HQfrALgX/E4yWZjWFD4GKMwFiEuKdwO/VcIkpjXICL8mkMKudr9vhc8aZpnFwBHQ4eYhKrc45nDOujkJ3A2O5xugW8lnHXFd5BALaUVUuyuLwanzK4SQiiXv2sliP6C1bk00+d/jcVjK1pKWZiOYNw4OsIVDem+DO14ArwaAPc+AdoJVnHcdsPiKg2ruExq1fTFgToRpo36Qv5OvRM7tnOilksKuuspLgETnDD5zQMPKi02jOT9IxyRoqBhtcvdoicDQDtyoRLj3I1jb2puHh1d7gwzNCGso1AiJjGFVE6Odsb+EKSxCROSFBzFrRnrKMYTart2oQ+GsDbcS9ubG/VnyJG5bIy+JOL9rjlaKu8IMUVLRUbbuJbOimqyTEIlQj3AykGEMsZT0Ps7u6YbOK5tZbgLtQ4okm89UoEauiquujSrwzXgKiMpwrFbp+eRYpRZUoViecJ4cofq7WAllyCmlP7aosj/1PqTrXLE8tOo5H3+voQHdXbrv4CuShp8Yi+/sHjquYV54XtbUYBtkUZWMe+KK4lZOkP/YoGTI4t8UikDoWxYFat7D4K/1cs/Wzbb5ItkW4/+3ebllUeYWfkZmuJ3XtA7MjE4oJuYbFwJqsfVCIwIGtPttTTpXbU6JaUakEpUFqXlrvNVbeQleafoJviGtjR3sYzEW+hqlc9x/am+56iuAJOvggQ39h4gl6PiAsrhDoFbuIzKUNFi/nQq576L4aQrvGhAEAKWSw262o0B100Nk/jYX9RTw6VsYl80W15pKhY2zrzZ2aZCxsjAmb8MKKg24a53pcJ4wndjKvbMddIq53l4919goUG2ybT94d1nRQoLSHUg61LkfB+TrEmVE5ohed+uIio+AV+k+YlJFKEFQJRTpTjcfBHvOtGeFslG9+HmXWhYSPN8nAW0cu7MJZ7eRrAqZc8t8UTeeM4Z7AEhp0xByX8giLQS2wTqunG8ymmhh/Ui8aeSZBwHp2qkHcIOIpnsvuGejvhdMQU2cY/HUuPoAODz57MXMJZ6RB6FknZ/9XL8n9z3oDlaasfBM+ePN2X+McbCSO7Ct/QqBahBaZPF+HniCdl3U5qW98xhKzJLsgr/gsmWwPhR1hcpF89yOMzZPOLbuzE1L/zH/enrNIRTEaL6a7sj/r1WlSRaeSgStqp8t0vYsRzPWPydrArJqOnva7rQEeDpCyFr0o2vqDNe42q2iLS3Kb/xRmeZbo+IAxbjsd43V0IVbRaGqi9txG9u1pWFytNJWBX+NBk0104E4ozTe048+GDIE+rfD9e6Kal8IQqYZnaBZsx/8p2VHqHAsVmpmzehcGNz9vcogiyj22qRMG/Sa3L7uaFhctS0GW69AbOckLEU8hZIPnHC+1sEFbCXKgwePe8sc6QUPQwggvomIdPn7guRY/xsVNYZd6gYauZBggThakLVIlzImxqRjVrvgLh53U0j2FXVNo16vIAMD2AyYD1COjSu/UMpTgHMhorx232ssasM64gCNRV7OlfKmRoeel8ZyjQ0oAkLMdr+z+unSKzbcNK32MSnIjaTbSkY/yCay9i/4dVSTeZBz406FY/DypyfjZ9CJnFMCbfH1L/IsBB/p+f4joqdPtyHVt/aK6Aa237S20Ek/UOdUyrya/d4oS96rdQZ6kGj4nCToGu+adDZ7CxmkanTirt7ee7c3ygtkR9Z0QnHAcj8xYxK38S5D8DFxy38koLhNx/jeNnXJ+jERBOf5yPIPLbpfXd6/nxSZmBDOt6MyGtvf9+J7xPeWB3FBG5EUyrdysgRmiSVUB7ohM8UUFiivpVMhIXoG85xKTDQ5+OhxBAnKSVG+bRXQD2Y9eo+rwLxLoFxF8rMb4Mlpi5GZSc9qc6Ur9Da0VJh1J4SxqhTb1zcdwRDSySvv1CopOI1bxlg4ySzGo4OYY61Ru1KCn8Q5bSBg2GMsYaul0hqJGjyIxBWQsDpKmh8O6DkI6CKRotMYl9DcjEoCneZ4gbnvCEgrxKzPQZrblZP42+6S6x65P+wYrLrNwkMU0SGJ6mpsbEUEKpA3LzNDo/hF7feUl7HInLHzVBXb2gwrxhr7/IHBXA3FqHAKuZ/I6/2vXxI6h30RtFTXoTm5nHnBF/0UkGEhxfXUOm7/gFyWyHDZfhE5EKhw5P/pQu6j8i1HISQv44DiMCUKzWgMGWl85T6o3Q8jFO2CWaIVLuxqA0r5TD2fF+sVF/mMxFKDzp9oB8dde0eelEV/rQJ3NB7RpKPZCrG5gwmgCKlr5m38OnNivnu8RDuYKuikhPeSHbe7mLJEsiti9IXVrHz+gu3b+hepdl+M+cRTewocJAnvfdAa8+DwralLtWmGeiwvejbcTPG2em9a1CFEX1KBsHrkzp8k8zKubxlQmkFzZ1UZ5My7XR9CRG3LBkyoDjAk1RUcKlx8HvKFYQPPOnYHI9aXChwMuvouB83NsFFtTSsp2wmtsvL3GU7e3u2P7zym+NVLvTU6tSm9nH3t2zVUpA7EezRjPKZnv442z/1kd9wJmS0hcqLqxL86iLGbVdrESmc0ki7rbKtJNkYIxFoyXR7499NuA8WIajRM5JD0niz44G8l1FrnDOEYddD46LgIddGukd/l3tLFwIeqMG0EO/N1Or5EL1ImO/h++CcFkwqVAV4px4L/Xh0vkJW3Vp2FZKcxHD3u3oey/s37Wt3vim8m29SZ2VYzOVaFHYgM2GuLdVeMZ8y1Ewj1mvjNLVWsw8QBxxzaVN1JO3h3E/GNRAuHdLKM/lLA2sS9cZvd65zUMhmSVEHVMkMH3QkZl4ULYSyuzAZK6Nos6psGUiFZkfULdpzprXqEbMcbl/f3rVurLACYax1PVsVevlbzR4s7IzbeMkZDD94ETQ1b/e/9getowgQCQuvAD/Sji2GURVx5yJEY6LBuromNxKninWAX+gcJgv65Moq5krsw/22UXEsa8fmdoQ6MalMw/17XgP9PxF2KPgMoll8vZQbVVep5flSeoybN511OY95ZBm6H+bh4tecVEVu5sUTbfy9PgsFRz7o2faNn1rYmbkGNgllCbtKVUAA3HkqwffNbwyYCfbrKb7rYBraGe8268bVy8oiGt2dT42t6ejC+mXDbRvLu91cZ4GwliGd4WgfKEXCOZSQadzldDvejtSI2nihO6tFuCpzsI8KRZBC3ggoelxewA9SarsJljKWLeUf+cDyEVAoCVG6eJznbqKChXV/TsDeNoKGQsO/5J56PJovmqK9b9OqCkFLzrQG5XcJMw4scpxd3cjvaZWseYlLzhuFlh3THSSXCPM52gchaM8yP6tdWaFKEYnWyNw+NZON33AO0y7hDrtUg/nEglHTwhyIGGBaRj7u3uGktYaH+FJYnuinFc7LUdAOplfKPjzY9VAEtnm2rT61CD9CjZP0FmWHxuDPWQs2ld60Webxa38N2XjH32Cyt5khn/K1etcMhXs4TW4oVBQRcg+hF3FC+S54tcFohR4yPKmlfMfkfyEElGeKYktK0K0sJ5ycmzGNMPw2WQP+DWkv17AvJDpuZ96V7+K6vkN6PFlPWSKwcPs38ax33+I5zsAEP2YPmA6S4Y0VVZJmZDBmDLl4bPK5+rK80SZtLcuqoHMf3w1aQAkKCqI9mblApSBFC1jTloHDWf3P5EQIutUbIr6Gz/1m4hJjTkKR60p3Dbq74blhXOExlLlEFm2Uy91wE5O0FLrIQAO5ITMqv47ERBbvVWOzQKoGFthzoMbJw60fOJ2i7Lo/eDCeO3441u7T30/B8qduD6WSyartmdfS+klL7E0Fyr3Vr80mLz0sQce4mM9oKeANRmNJ+WRaR3zQJkBRupE0P+7JZeofaeaqszFjnlcueKc20RoIRJ9krv0Xilq1KqMSOltb+xNhVmc2yMoYCj+5XFN/kpW6AbIkb8auDEZC6r31UYDrzkYiwXDsnTZWIrDsgkGWJxxvBOhhQ9BsZTclZ3jc+DVkVQOCvoTNKQZfdVXfFYjUjHfnKO7S3hsCcJMmI4hX2lfjtrfTzOmOKsc1jPirRv1nQBMoRiEK1qBi/tV4HjmSrQOUYgX8UbHPaYMYRjZ403z0ZjD9j/HLXXvI9c2F3UEqxzMJhwiFsQJ6Ojh7csfr3M68NE2xWRJdL8TR1aPTEALWKQiT5sWU7ZbG3I2a+DylJkH4dv6WeWrfqGVErhD0hILVfmTBnyREpAsmFpvgeBcUYp44Svrt1aJ89dIZ2Xm+533JBtIv/Rd4jPsYcR/9vM86Hlq4dY75zfcIevHXVRwyUL+qgNO1SHpY1DErnTctzwvV93GAeU6QR55zd5LOBvzJTVAplUMnuMeLUKJBA647u8D2Hhs8wN9qhjn0lENaZqteX1q1wd1oVG+YqYGjuw9+3M1x2MDeqB4Nv3+lyJokZ9ovTWYy2mAM3r4axCtcWXHJX9+4iPK9DrURpuZ8DfQcEjE0addecXZwZ5exqSrB9pZtJRpiF97Mtyc9gUBJd8mT7CkXijwtvMNNH8mExL6vG02h8AWPN4MhHtiqpUWNeUPozbv8SjRWNIIwV+n2uBbqq4S4D/6JrgN+k9tk1qqmDyKfcH5z8RfYVgt+xDFMUn8ozLPxrpw0crwJpBWvhqC9r7iRwit5yh8UA6MjN09FGO2W/eUE1Ii2DcBcFW2Icj0mkPXVUiRo+jw4jMC2SzCt9CmHGF1KVybjSOa2tX572Sqg2WnmPX2fgIoUKgZlnPR4rKaNPEtGHkCKJdtpWTN5NEZ9mqsJuYeJ8BjDvHfpNu4l14N/Jz3Z+P3fJcyHPywmgkO29s2IfpX2Hu3Q5uaWj7hKlbKZleSN2aIm+7ueak5Rymd8dblNCrCX1fxJeKdXeYfjrCPdjyjQ05iXnBhF7bCxthDLXoi8Fw42fNYH5wbl4qhea0C9KiETRW3Q72heFhLDVIaQGcx7hjuWHVe5ffC3T5PPGBlF8ji4GqhHvXIAJEO/H6ahss1ZLPGzcUW73EBmk5FtAfJ0MKtFBC8QUD558lX4Lgq7HKGEPeR9r8BNRRmiXVhHdqMOKXCKJZt9LMBqIreyYNIRNpv1kJSrQvf7xMud5EVXc6kjqVkT3Sr9DgvtuMiPYT1W5ac3Hx9xMNqWAWE17UBuK9eSzET3Yt11lzXyO07LMdZh5MRd/rSV55RXZr3zISFSB8t0MYwN3Kp80rUhelcbu6Bxumx42Vd8V9p/vdz1CiYbWkXPgxU0F8ucB2LxFvbnoNUFkuNVOC0hEPjpST90FznWC+Xe31Omi5E8krtyqTFpIScQL8Qbv82jI/AoRaChu/JbZO5iQAvLCBZvjT7+PedYwzav5GYH88lKrErkXvwmSeIQ7wIX3ia4DH3GT3xGgSWLCV6qdvY23CPItEAK9utd9sveu6x2pEd2qN71HTm2fDrYfnSEzxWtk8h88mdWum7w8k02YLNzY76LW79eh8kMv+ZvXEpDD1xkBQC0tq9QGn7G8V6DM1XHttGAGuGlD6dCkc4voinMwFgz1wcXrAAXvPhZfl7Munq11rPIy4G0rbUSSXe0MmXvuju7RUUhM/79MN66snP/o1jx04hg49RoDnFcngBbkOeHa6+6f8DqGUTc/AMoIs3CXqbaHG7tptrgRKOeyyoUDtU0Vl2HtkJtLkmOjPwmOWOCCHtxCRaeCveepfMScMFd3mVPxME9JoS4Sxo5xRkWLAafzEyHPyir+0FlkU4taEWXXgX033AfN1Lr33Bvzeard2OWmLyLG9cm5Gy8HJ9SLkq+hqoWUDWjqun/qP2+o0CziDC374o10PSVrMIgiy60G68bybnK9x7m5Xlxdpb1t9pzXGCZyHaNU+Q+1rXlS8nvPnHDPwjkxO3/9s8u1vhzo8s3eNO9G/0rgJfweW9s6Augp2PRLVsWpeiRwyvNi46tDyWyJvyvSq8D6R4YFE22PGQxd2l7gZ85oA4j688n7NwyQh0Kjtnp1mnLDvIT4uZ4ebzdRchEObAGr53mvI5sWAfIrYCRR0qyubvst7J521Bcf24lxX4cfTFZwWQ75DabFpw5vsFYkEI5/5ZHlAX8AwyxgfKoJcfP7C/I2hEIBttd6LDMKdPbOFIgdBZERNeDHqEsk7PQf3NMeJa85jEO0i1FQXieKPZ3vEZNkC5oA7R9db+PB2f6PK5Gpm75g4Xt4tn/WBRxcA9Q9G2sfyZJHAx8NxZIk7ETOIc+kq/nEM9c0xMBc1+hwdKcFXIanmfDZTKAiHIJAtJAThY9CWwTBBzfooKCY1+RWukYpENm0hBCsM3eZDzqyTeOrh41L9yvHpIEyILboybH906CURNiMyaUDmTr5O1MNQLo+E9zsQixHTOhiNpVIEg4Zo/c0VaRn73juUl6b/miDmgTihxX3Ueaxiv6hBFV9F5h+nU7XZdgEfBjQhikzz/Y6ShhX7KNxvY8eVWqjALoN+nRHhTC0drNTa7uofbomRMg936WGqJ2H49tbj7jw5QSzdEkWS/4uYr5IOspKmv88K8IQXe05Liw/XoCteYJIbpHJkyFDdm+Y3kyA/R9Wj0wbl1t0xHjjG9B3BbS2DKU1ONm7EB4QqwUzPM4ic0y1EJWd3lcQUC9jFEhe4TcfGUIzxr/rd9NkSb/vKHE42+8ve2oWOSxgMvEF+Y6llnHYsLpyUgR62nrQU+RCf6FhJDxXUR23+vWmaJI/9tA1GJRsAyAhWGuvKwa5kmRGGWCOWMa/3jxieqoackEUc/qdPs3Tj96ANv6D6wigZAFuFOJzZZRdw2bsZBSF3fDneLJDTR4bHBQv6PN9DjbvOuCiXPYXQ1ZgopxuD4+hCmZXe0/tUucd41WCUAWSEmuhBJMC+1XFMUtxYHLHdXx6+N46AATi0asdnUrLWG0G0uKmQq3Ck2OpGsQNNuJuQv4xcwDd1Xu0ZY41koLmZdfnr60Zk6/v8NIoEvPbid9697R0WA0yDLfbWgDy6Nvtr7w7Mlwf5SoTz3YpImDOWqDjdx59K6S9BNcX5CuQ7NuM7mWXkbGN1cJjT52s/gz3ePJ75nDbOD3WweUcXPKtYl9QBnz4uRyXMuNWA1TnchrFxAb/v3279i4RCilRcpXS5FBWanAGnyZNSG4de8BaIATqQwQvH7USexU6YNJg1WzEp+TxhrlzQatL1C5nPbQtJaGau3M0HDK8uXA54vxf+YmI/bRSAU2rVOwtIQJOpoIbGjkkmQnyGy2JCuxldfAW7dcEXPjf59RUOoPh3VKM+SlSWn0a8OWib42y1qwhSRbqwkjyVlHXJ7OS5IlhXwNvxxo5G/QJiWYclp/tlSD/w+ltRVicHCk42J1LIful3NuE29OJlXxiMIt6XyrFK3EQityNvTzfT5oGzgAqCfiYVSZ3kta+a3SWZ05g0faRngMz8Z6wCZuf3gvFzYZZoY9w1836P8SET2RW+P7XrQqSfm/pWrCBpXrxRevVxbStBgDA3mf7SmKxvJHqdQzHjK7BCBlFrzm+u40e55u5K4DlwLnhKoCIvp3aVDXegKwRiQVcfyc8yX44cB7IIUUjmVxKIAZwg4PRivmqGki43CRbd3TTb+qqAB4zDZcdDSh9VwFrw+b5nLdsO+cV5C5m3kJZuKCh5WfIOWXWCgcaUJdJW/0JWdmzLodsgqWCKo6oIFJH5dIe/f1+JPXdfS7hCkT0u0XzojEwNVloIh+ttS7vPob1CTjGKC+Uc1O5xjQhFju+nLAshk5ijlc83n3HvSNbbNiI1imEWOobT3/Adn+xzyam+qOVf743/NnGAqna+YCDy9RYiPTmElRd8MXnrai7Hxv1H3PyFaifh+ZUdddrn4u8PG8MvjrmKDs1HSTUbkCafL1/jvRlBCTjUapMY2QydNpTN1gwH49Nx7ZiR7vCyHlgVtC4vAk2F/2pazyy6qd61hVs0h/m48oR0ydBXKow2rrp5L9B2r1D6jz3kn3BlEVP9Y4D9LZ+weSOYg3DINk92nFeWahmf7F1w4YRoKwQlWtJMBvcrdPhgryqHZLomm9IG3JAj+j5S0A822WRreb9fyIeBSdS28MC+iuIdINng0RsdtzgSNMenfuXAFySb78OrHLY96OWrsYnO8pK5rYhUU+Knt2eU3YuVeM78BSbLgNCXutxMDMUgGc/pY1/JOVtPrON0KjEgODR3JiGr+9AAlHAJcgGwNI6GvAs+42m7Qh82Myz11LcIgYLmZJsPko9QtG9SoDrUv+UzX4YS46LdYqrA9kYBP/Zyj/kVC0TH9DjwX9TQIxBuQmiGMxpq8KE7Bm3jeZzuluNCDHj5TZUkHYszBqaECRU2hSAJfkZ4pM/rvmezTjYWx7Rq8DNbxdgVfWPSFS4wIt2CFY+N3MMG3teM54zp64xK6Szxt6fPZQrJLCTTqNOiSjTustRAVpKMuFpP3axZn/RoFIbDILPGjyeu3IwlsQ8A+VVgLffnOEM8B7GxsE+xmiUQvKD4AuqqTpwXJulJoa5bfZUl5GcRcYywS9R2fAu8yE73VlGNOvsSOsxYv7pSW9ZwKEG/I/RIXQU/0Qpi+dANCMYbmJrR7yaSYgZeZQVJkrr15kwtlV+ZqH/1ynu+uuJUlk5r7ccKqwlpKsWh6RXiqS11L3ZMhwZyFxRyBeMV+WXab71cOkAQL7SOLV/GAsoCeDEQL0ur3cIXCmWRsoWOW+syo9eQ+ELqnho1OuI34HB29BCCaMcZhYaO8D0IhP6+LRnuSoyA3Zwsj03ujVDB6VmJQ2HR+8bEcGSgTbg9/yp2VBKw/vwKPOvPjgkuD215dmYS4VZb+DgVHvjuz8JIDVLVQ1ElpgVC/X1/HNWIGiKGWet+3UHMy7gv6JsipIKFjjtzoNEdop4P9XSN+MQkRQHaEsniIMeFcdL9jYl3OlQAaM5EXF4yIG72p3HkGs1/smXkAaPetVe/oNMVPxdnUcEfW9pwj1rucUkcDh5qibrIO4MOi+ZTnU4XV9W7CmC0tyUpiXU82R1yU1DyjJMCXyZ0peKv3k/eLKxW3t85y5dvGofertU6YjqQCd+k9U0Tf4nOV0PxLNLlU7ee1KynsyPBKz4G++4E1dgxMFKkmB7WV8U9KP3YFQrZjdYnnR92yGgk5OgDo0pcw+dlvMq5H36NZtaFRigH9MXQ48lJ0jsGDn6MM1pIRC/jGp9lTdcHHX2k6cZxFaj6jNCGLEK2C7MQq7cFwcvGa3/gHy+9Q8jfcCa+vUd6dVDuJUwqfPk0aYDOoZ4DkBHOPr8bNCtEFEuQ+KeuutO5gtjozT6oRFwVCTOS9L9b2TWRozRjonFPrImMdtKX1rexqbRXDsZRD3xdgziiMzEsx7qmAwM2XVNVOmAoJiejr6E2yGfHVqJtY4oJSHySfRh1aa4DKF5nWt0uiZuuvdTsOh2EPLFFL8H5FFN9wHdEaKJOtajTa08swy9QXj6TCXNJuQUzMA9eep3+xInL+O47VsfMUyVUm5QorNP09Uh2yii5dUDE0xMPUxnXfXrjaTEvLKje97yzfpDf5AEnAgJOql6Krr9lZQCTEzSAREi6Y/NTY5kLyGvuR1M4dMhk1awBNLkPt6te1ZbpVckLSpveo79Z1MHlTfcXZ1VsdoGcomN5QFh08nGI5oSREI/2QTgZHhbCDb25aGLJ++Eu7yCK62MjFJj4p0scmhPybbVTiqjUfmGxA59FzaMyxJ5KvYSPrpzjFnqXB/x0XglIQ6rryXVJdnXYnL72UitdJeXuNynh9ZEaNU9RyhQYX6RiQJI6vYK4iVPDeRjosKGQaTo/VvBtE8Q7p2VPbGat8ScQTioE8Yb8+2jmftbMvm7dUv/2bQ02EB1ixeNz5EiqWfTGds1K8BEMvYRxKpL6o8Y9r5dJ2+5Tv3lDbm8lInQJLy4m8yx/2hSTpnAwMPgo249KACb22cwcm0gVNUIOlBTJvFRrhr9rJHb9Poqh4Fwwg44l9mG7Bn7HLpfg58PLCNYdfUufW4eI5eazx+9cb+/Pu+EzYdjCuiyNfA72HL9YCPdJqY54UTcZOAsBhK+ZXS4JDQ1XONpGay8QIm7S5x7DeLH+2XpiSk4kuSZX/Y0YCDheD/+oDfYDhomi9j8R4rVegt/5rMKITLYG6vppaLKEivwagQRB/HplrZfKioFetQsxdwk5Kbtvl6ZrIx6lrCjBJS4VGcCoEZvk8MWTWaELJOqnqGiC3ucjOgopoZ98UrDjDnsEbFIJF3S7dFZtSPd1W+pUw2/kR4JQ2e/wIBANeJG+u9AVEJNYa716he5GND+uxYdRyZKIDN9m0IBjUtHZc16CvcpHogL/JeU8JcaiRjjpa8Drz4OVVeGHz5LeAH8LQEC99j/1gLrW1vA+T0tQb9spXN47oKP1cc1lM3HelOMVQK+K0bqn2HTZcrmO1f1col6Ex5j6QY4+ikdmFM97cdVNCL1+tLw6kBk8CYyqhgpUpas36kZaGtKqL88xbSw85KG4zzIVKFgfbHZ0vHQSi1t6TGD0jhvwxrtuPEjcgG0Suk0LfFwhD80bMN9GQYkZXhP9djPV957JUe4aK50/0lBQsvqk9SYlALZpdP8lJDytH38emYXor+LqTGY/bBszOpDp4l0PxDEP5wkJ9EbXiChh6Cs54Q9mFOwvGjjQ2uEbEVbGKEjCly3KlS+qt1P4h9LvNoBDMRyVJOsnRsJZNokJ8J6HDJTAiLKSgDbsTI9HhSF0bo4MfrgoRgG2QMt9jnyvHSsZ7q5Bta3tsM/uaerzAljwCcYeWXhsmI5cg8cMGsOeFND24Oxcbz0wMp75gUVVn9bxrEYxNLjycE+jtvyyrHvrt2QDnTJclIRSb89ioQhPWE/fgyST027cZAw2z2+fjbyJrwF0RwxTphwqoqC1UvPNLj3xNSY78GZqD4cAo27pWEktAKJEGz3sh9SBX/YL4CcL+Gq4XSh5gXVDGS6lgqIT/c6dmR0F7JHMNrTenBmcsLgeQG/0aGEH+N6lxPdm9D1Bwb+i09Yp28G+gJ8wuTIxtNbnYQ/32XHRmE7Ea+2gCQ5/C7MVRHvQ/uZIK+Eqa4SCYfIuK0OC+5Mg942baBHYwdSd6bgayAs0h+2+UiVOE1pUZN+5m8d5hXSnNyVr2N7DqBRRFqXLgTy+Gt9qW1ptz0alopIHi8Bd0Eorj5elbW2y9gUXNLCf+Blc3WK4hr54a7kZQnPu3aGzP/rtO4dUAZRMnVe72bkvjd3ZVH0dY21SpDhoUX7MPqJaEciPHsq8jxjCxiibdq6Btg15wpJuAkWhJA78JGKhBMTX7IY9Do9J/uD133dQfIx6Un7U/CI7NJoRioAkW2+eqz4eqX1IvuUOVcLXCOMq4s8WC9ZZolFDTkGypuGnC41awGLUv1CQy9C1CNGJM6rBlRvVTItgWnTpc2n3zaGxW55OYQklBFI2Nh9CeAYFWzidLbTNlmD34+L/U+sszinPm86reftyZRjOl8H3x6Qu6JwzEZqosJFK/F674rxENi/HeMw4aebPWDrAI30A1VlGoPpjtZyTak17eYIdkqV6oTdVHy7+Tsb9GUVk0g6UfVPWy0wSJDt2wfYnC20m3ef9cAyljeuSjOn+5nexAQ8lWRWl8lyfLZY8TkzPGu7iF5kf6auNx9YowvUlh/6gnDRwarKTHfxlqpk90i3N0NbGNSBSpmyN7ZWZvrw/Udq5NdZs+zFEmJMHJo8zJzEQ5UiYURwA3gDuIdbRRBcmqf0QANkS4aOtJVCr/7c5mYpV5xTW/anzcKtW8ZahrHwwgfPDsfw9iZkgh7WzSNfTczgUnMrkrxYjmVOC/psIEdcr/WoI3sYTWUdGKwDKLbUV/pBF56BLjvlpIKskZePtJQ40xgjrxxXkgKMa9KCrkM/kxSMk5TYrjHxHRxYSsIBJWmwweLL0Pcsi4u2Nwa82peYTyKIV16ObR31XVmZSQFnCki7uMiuYBYnEO6tuPcoR9I5ijv7iCZLSXEn9UOcNY9u15NHASRsRRGdiulNSikkuOOs6T08Ciu/JqNKb69pBH3FZAqnBtyV643/kP7htf+tPLnmYOPIuN72STFo+z/SEW1b7Q/wEDQIUQfhlxj0saK8wTSvHbk49LMCbnAE/ucX3kpvXqndhCOqb3nP4bd0T2wZ7mqBoSLgNMLuEAu7MYE+eUfU7Sf2JqesZuDmlDJ/cXeljpXpPZ0sepzHY4wCbJDjvhYo4zg3TGCXwD3z1KErgwhBeYQQho55TLy0nGdZKU8mMQit4NYDF0O9gSuUe2CsnFVU/Pw2WwuwVL76u7yD4RiM5vfpXi9+nr53cST0EvtcloKlTQod1/pfWfoesjbSwUvfhNoC5U352/xs0Q6DM/aw4vRINfYcj/TcZCaUA4XdPlU3EzLp41atZrgzaPscmnoSb1LkZ5F9erle8QuEu9rP4Nne6Bt4lz3PcE7acXBSKkKgOL4L2lDUzpLYMeJAptwjPlljz0I3uy3Ulto02cpzm4oK9XiPQxct4ReeSsfWGd8x+Jmcs8AxyY//5XsVuy6ioXfSnQhaV/MX69qBcCfHCnEMiUfEfXPUAgNDAQuiReOWxGPsAdhQDSsFrljUzk1/nuH0qFPN7ycUJYhCKvQhpvEawhREUmqXmCwrotzCMR0kBOF1EjzWCLG62BkcdzvICq2+sfZZDMDtqn7O1FjkaP5Fl7EU05x9g+Q6Goxzh3E+NhibyVP1mc4DXjZ2GcxmzLY80FXum6uxg2F5VphPF/QpzkVRKHZciELZqF5MQZQwCltkVG03a5XAUkJ8m3S12Lcrrk6AQlQ5jPcnX8kQmuEUjuN3OrhxwoKh/1zRw6+PgyT3NrKW5BiV3b+hBssKfvuzlys1eLy/Lteh1RwpVo6FH0VtMt9kJHMrNh6kqYYu9w1efDRXyLmrfkPw+ocBwHoLAw8i6HsGuIcSCCfqigT5U6irwPgavFNiIWNWEnwa1VfrDwhPQqLEfTk9YQMCfA9A9OD6HWbfaml+vspEgFd1zu4+TTFjQvd9Ask/9ia9O8M1PXvSqVvpiA9z8sKOqizTQVYUyxp8UzsSyQS56VndjQlzXdwgjZg/6ZDcUFAzXK0/gU9PYsRXFs+4txy8HVI0BMrHoeiJWBP2bMPJ1d3hGIhpDhWsXhvAawb2W/jA90R+ww6XLHyXRcCpheFVUXMLYI8d0pVtB9uZayai3b+ALp8R5oNob5rr7RBARJQBa4bhCANZU3TkSS0s3OiP9Xo74Z38OiP4iadN7H3UlirJ44Ju8adnTxrRDA+eQ2AcYO9i2VktZIdO/oLsblITMmDSyvTM+MG/5BlLqGR9zlsRkUJAb+5lkDSBSgKgsgL2Ol6biSMYPxa5Yl6H6sTISokxEvSbiyi5zjFvo7xUnLp0nWmi8g/aUoOTyc41tsMimKlLkTH9hnb7hqf/r/z17zXw/truqVfONUmbDoMGBsr63aGcpNjQfA7TR3tHTJbg23cmKhk11i7++4RAhyn/QmyVKQ1Qws8Xn474QlttHQfySBW98bBlEBexS+v+t67jHSkff8ZRVVUN26DjknmAdWPyW8g5xFo0EY4rZXN6+vTKZMpnNHxCbMFYozJ1QfTBEAiZqDCFIssiHvOcoUbYwjosG2WfUhwI+IHfvs31wxHnDG9Oeo8AfsZ9SsT+pmQJUZLAMEmPbdPy4BrefOh0nbT0MwPo/aeBJgiRBo4BM2yDHw+27Brt8JledD88gK95k8OUvq7apCR1CxpagSGyY1zL39fwKzF14b4s7ZxDRR+3KYoETJKk2l5FC6ShSnEcF92wbZrZNla9YNgrcHa9XE5GxnRytLm9V7JRODJO9hZNjV8FaCGyX+3CVVhut/n14r7jq3RYTiHKhsCqxLgW+itOHWBRE0YN6oRhQuqrVIrl7VX+1Ejz1pgL6iY4Y7PrfvyrGHefzhMGw1bs+pun03JvY6TZfFqU/mT5eesHxCmb5AjMcZV9Q79xMDn90W11NCnSSahoUocfYhmKpbJUjz7cj/5y1lECQUUA4mWzSA54c6Eyy4tY9BfO3wfPQQ2HvgJ7UhUeae8g9HUBRAerIlpcJKmdIUvJCUCHTIjcPHiKvdmq7obds0rTZ9FyAVC8HH96PvSpD1qBH5qlm9uynpabJqJ+hDOkjwn6TT/dOM0cQ1pdt/JhwQDq6cRte1KyHF7Prx8KDTpNRjcpns+cLy+aKP40bsYr/88Y6rdVlEVrOwhQebFMqjaUSPxQfi+DqFFLB7CYvah07TSqUsbT3lKFbdy/Q5Ok94QZ2tlhzlzp716gQxNN7JrF2GNWqVpNMhz4Z8EjoukaP0M2HmNoIzme6stbDBBLeUVtrazJlp2043PtYLWfAdZmYUB7FnkdEXZE4U8NGHYoDkHUWY7b49yyYSgCpPVL/7OBQd65V8ST/M466v1beKsp+4L2O6kkcrbtRbqtkDYqN/AxGTP4Y5jy44UoRNbawCDrkEqpY7/o1aginMbN7sxeHKaBVDlUfTxcTDhXhGoh0ZTpTJ86FBbaphF+BT9kypbQFfBMgmbeXG6HnmP1jcuoHvhTLRFbpbWweIXl6nEWCZXBOfDw5+DrcIheiGH8vclzWtGwiJuQ56MWf0j2qL7AMXMCvvZYoK+8jfJmqJaZ1BKn/jMh4kmzPE/KWK1LT/X7oCwGngUfhVG7jVBoUTDdIMW17dylQR72eulQyArKd91porXEKP5vUkps7woBY2U04yJfWWIRs2e6ldTEsd55LZXsN9Xak4JxxkpKY05HH8uLJjoCUGoEnssgPffBEM2tpaEG4+3Mps2XurlBM8HsOfjlHeJWeXuvZ4RZmdvyZL0UrxSKEWAWWbKxocZFWf7lYwVs7u/CcL5XS/zrXkPGaq3blrgaoDtPsRxxDDRS6JZUBn//USbFQhPlyNqc6WCgiWzY9wDawDdPbwXBxmA7CGsGKG7C+3+1sdVjwgkmnFwWGq0Etl1+WViy8mHSn8yciL+l5bzV3tXZuS+7iamrIwaFtE3Ke1c+erJvUJAzBrYUTiowF+AqjQJZm3OIwrnLSn+Ulc4A5YUdfpE4ArZpF77NSck+rFF/3Jg38dOzY6EyIk4bUSbbdADdgpFuKXwAVLGNuaJQc6LhhaS4KFH6byLzNxUrdJyWzg6Qmn/TJTHPEzICbuQzk+I7gGMACdUfaq7nChNcMrizxwrKSjUGas8mHL4XNTefYAJxt3sMbNReQES2Erz9RiGAqopP604EDg0dPwuiC4+TILZ2xM8Voz3jxTR3/YIu42dOK9rrlS6SLh+LCwNLkTcSIukXg6LUwYx5AX4gga82xgA3EAAiqGZZAckzVvnf38ke5nb25Izc5zQ+0i2BBqbtu2ipqyay9suOPgetw/R1OJiHEXWatTO1j/us0zCrIy7wc2Z+dYNgZBWnBhCqcY9SWDArUU94IdDprobgL5vcoZbFUJb6f45QRALgdfmQtlUOUFrv4Nacx0pcoQZ4F2bwaU0VYUW92sxlExsMbvJ/idGg5CbP2IsqO4gxo7/3IUrmri1WGvVvY8Mffv0sV7on0dyLFNJs2xQJkFjbT0qqwovEM03M3fgyZ1s9/0FjVLhCup1MT4AgjE8m/AtSEiOjRgIVf7hyo4wQA935Cec/86NREF9SoE1RjsnkjT31U3kce64BMX9/bpkSvDoci/Yra75thK8J7G0ZZmjMr5WOTfr5cUF3u+74GwB80yPBUV0ZWJXBCFN+Lo6p6Zb7WRhzpUPZpb7KyzRNozahsW3rFIjuUafYB8uFp4ZNgJd7YEiAAjecY9QxCGiKQxj2LkwJKO2+RRy/JfTQ+LIs0BqYbQBc5R7E21ZAXj3i9cfHzGFB/WtRy7kN0tkWIWWwwy2Sz7TasSTYR+FMNh2AXirdnOJLPCcYPB2cSaEsNoZfLQbcAbfnNiAX8boIz90eoLruf6IpcnJNpEQlQ3U7XwQxZgzPGufPJbCAV5G7EUQm98kcS0znshf4U/mVFov26bRkCMp5JMLe7CqGMSm2pzDbob9yKYzq1aHn6HwAhON7HtOAiz0LCvUpwiz9RJvFqJK2LM7pI0EI7bv1eY0gQDYqC0wGZ10+1oi1HtuBU4vN4ztrP/LiZBKYdddDb1AcUPmWVA42dCuPVw7PW3snpg4BlH3YMw2TtgZYFs/8fJBBquRaX/6b9neYnwHfj0v5UEITOEzALC+wRRvla5X8NC9alucOY8YaAb10nVhUG5FV0M6LPFQCeSYrnre6dY9AbI5Yb5opaAo8mAdDoDUaMNEujMdYRuWw1i2Ay6LTaBUf8Eb68R1Ea3nMIs8piZflAB3nAdjKcdRRrxv8YIvXR8w55YFvbAZmBcYuYbQ8H6DTLDZ3rg2D/t/Yd+YI0XvJ6dHkCly0t5PjSBoGnjZPslv4jwmv66E+xTbQ7AeHpBWuMSeA5LPoziq8vC1bFY+Pl5npdX+/jOI+F2QyJnaIPAZj5dtRYZCosk63uejR6XkaB+btbEPS9iqUe39bj9Q59o6xi0U2rQVC/SXQC/cRaIV4Q3LAcNUlZsQz+Mcfp2H/zPYD8Br2MxbAQmAiKGVDu0NsBqhMpZ6tHIjX1ZCSDsl6vtpmjDm4YTN+oisFsfYC//Tji4fxwWCHqac1f3xaR//fVBXAMUdhgzloJbuY36+072dcmgSbHaha3RbwxilGrBIDGCtAEDyfa3ltLBsQI5WssUOn1a5uLQl5VXC+GMItvHDgwPxzFww8m5mutf+cvcKZe7Uy+cCTrGqAfVkML1XqR3i9rtnsRE58OISAwQb8IDqz1mjPZnNnxkpFBuwtNC+ue9sAx7tMd4056EAn4clFVsDFYICqdPpqaLoMSSniZepN1rWFnrVSqdMY0jMVNNEzF3JSH2EcEnV6rsPoDyPsNDj+p7KhyaQlVng15JpBGvcWeoQxwzisC1JnsIk3H9/6NHrG8s0MrHbWDP/XyIm2cteZc7kMFWMGO06PIbOGvB7FY8kTNDeITSsfB03HP+/rk20CAGiVy5THFc82ptbTVKOzZtpa1TJB6ntBFRHp5u0COw5to31BLtR9Fm8P/ye7PQSUxNuX5mWbyGIH9ZW68Q0VAXFINvAY0ZbUhQJkzXQRYVzMtW55UFRRzFiKvjWDBgBZdlxph4GtwqbODR2XbK6lygC1F/S3surjQmKjCodHzR2HGzyYYeafKvwb7aASTcKS19DQS6iX1FOjs2tBSZhFeSn7LE4cjGAj4tFljdY8/Qa/W7UKLMIGwUS/WKlTD86+7W2dktlMFKhKUnJr4sLA9RBEiWfkDs9EA45UNmA/d6cfoVwHBa67ckCNXF72OncMB2a3Ja+GIOzYvEjvQqYaSiseZclF6tcJGJCIDd/Iwk27ojLvXPfkw1kcC74gdgR4OFJji8cE+sfw0XXSVjvQqeIa2QAoIJm6bobN4lslTMNlwd8u+jrPHZxxg1m7KM12WRTB9tEFlbvTmW0v8V5snRSvrwZvYcKoRGOXD1QEjtXp6BxBZL1eQdLh1oDpJajo7+Ow0SYoveAWghmjy2smRzEzkuepEiH5cfAvOWMgAdKudTbrrKFMbdzn1MODe3Js7L42tTaHzLewlMd+etfvrBZjk/RoRfbpwOjw3KXV3dMxvO+HV8XdK76jwLlC8ZEBCMOemfo/eeb1VmprDnQczfdySaKDrpFp6QYdtFRtCPqxAM7VQvSjBfEcBNEY7PZ9mJiH7G0vo+74lIBJaIWzfydrj3ry6SFDGkNRbXtQw14wpbDVDEUMG3YYi18Yi4nyXCh3jHLN3sBr3MH4rIu/aMkkangKUM5yj2nYffTbPyIFMGynKcxkhAl/2lX7VIDJwgKn6TBZmL8E0+tzHBIhG1GreADC4P9Ga3IfN8W+AQkTkbioJIy1K/ztZl2uY+sFDKKLRoySWtJmY8kfO2r2bgiA/fhPSBvzfTjPZKmxTwm5czibWV5lixOMd9k/cuPpjnO5EHIMx/Uei5f80R74kUAynpSSZcQZ8HVBrJaUgi2dL/UyuTZHrVVAQqkIsyCQJ1QeWkPN0PQD9rvvK91QnLchFY7A1K3oUMRr1XSRTFazifGU2nhX3FR1B0ctHRmDvNfXypBtkP6XkYFdfT6m3liIypcMvkIMYoER2vOdN5tX5jcAndAwrv/Bbaq/l/Y0QP4J+SIlKK9NGARdb6h+hekGxFVJaPn2/DY1aT3zlxvxzpvTyFlEkF8n2tCYAyksoLLmG+F0PFwmXnYtEoJwHy4bXwSRJe8hvCCEgQRm4h1PBW9amk7A/zdu0XVrGvnSrXw70SwvpR1p+S0BFVi8DNpj6yhVAy8m/liz0dkwe9AlO5LZ0K3YcSbRtO5QD0RorQZ7arPkxv72cMqJkVYUINivF/JkCXsYrA04CdjXKctfoQywzjsU80SIgvAHEhoXiT33IxgKndGmY5DUJG2SWCUidYT0dXlkPeObXTs/AYdVwSLrZ528wLFucYQElfpOnilt9j+DPKP8diCI1DHeKyKMOV9Ks/E1fuMGSESTsEcd9NW4ELa0VThEW7afMaSRPKVFmtU49NBkEoad1TjI58yVXDNu/Sofe7opmt/KzNHnUaPvBpQdDv1bMGAZz+4blvaMPs80O0wTqSaAj7jPQGXppXQxdmAk8U2aDEIRDuUjRVTWEonEzP3+9BDxNZqkXBu3svwf2iUwE8plOe12ro0+8+fBVUg1NuMSf1gKC0yrfylVbBEA9uVgan+PmvRBxoelq/1eFnILVcPAzCABmHRyVL5/iaUaR2bnoWcngu6OU529AwKpYN5o8tqWiKDmT6QxBZwAq9JION/W/I8FULRbqtwsQUer4MoI8/ExvHc26/YHjeRjm+HTvi1yzhm/DMANzetkooLpTkuTFIvxUgR4veE3Dfv5mgx2fGGtIPQVqAzgmrnPYj3MphcTMsfMro19LTMR/3PBKpus7lS0yxnz3fi4XRG4gQYCrIfuAkIlROLSkp5f4boCIiweSZMS0v81/46e3uStsQ586Wzq80pGJv3tU14H/A9cib28OB4Tq2p+SmZ7pxDjUfqosgGjxhFGX4wccdLBjxeHfnXCmuYds5UTJhWH0vk9yVELm0rc32ckagyFyTOCWP1XrLa/YTWzNX4YreF3DnUqxVyInIO+dohe4xFzyuiQXcOa0SirGCDq2c7rLXLfKVUt5W0FucCwcmAu3uHx4uOlJujIDBI4G2EFzhnq1MkvkCg0OZDbaFnVP+YsqrhcM+F7dXMI8Ueh6SXyoFdXc4PirUlVPhJ7YmV3mTF80znFD766cjbwUEwZh6M7AMMia0ME8ZsDReikFzccGq3OZqGjAC+iDCZXR5E8UIN07kNNEH7ZvaGHISjX5WcSVo75UQOz5Zl5uukVyDeZSo0BgDHNW7+JNIov+bqIEPneRxRu3KQeYwrZHaDDaEv4CKH6ed3qj+6HDVbF6ci02L4RhT7rjjadtzUQGIZiNALz3mvJdWynAlNsqIxRtADr9QZQ+eqjyyU4Z2dgp7LSxf4T0LYwemI1pPfSh+2WQMoZf9RVMPx1y0lOgHfxGLiHzNqiXHfrT2hXWke+k+d3BToa+Hr+vPEI6ldn1swVHVO+hUKw5zSlVL+NgJXNi397O9AU7RfmKP9gx6yt0mpOw5vNc+0DIzVqmZ6FVgbwv7e1Jj9nGJ3OED89OnkWcBSzrv7t+nf1rByxPu41Hbje5PFxHiEDMJkeYMOEDcHTqqHaqQWz1mv1Iu8Kiz7SdOpUyTlQ8SXZtuyTRNOdFzMqijJm/J0kGu2aybc+Y340v7VDYhCSadEsRUurL5eONzIh5gNo4uJdYSmX6Bs7dLJJPl8usWZaWUWqPZXdCdSeNDXQgqI3wrES9ZMe64Hw/45fFszALI72s2aBfuftzEIwKmdVn+6my91qzYgfhVX9r8lXxM3/wWdv0b4yUdZuiEnfDN4T0FkX3tE9sWWpYzpLmZhkavj7FNK1EF1+8MA9Ops5kgwmgQs5zmnkZeYzYXiFsSjB/bi3fe7z+EU6Afv41Tbn5X+cOflVb2RFJULs9PZhwWMiu7JPknIj8hWUudUYDtCa8DZDzQISRxHC06xPxmTmfTsnxOtqVEDxFNzBeaR506aSo3WR+cRqV5zy2LcYBjP2HuOE4F7RO4U3mY79LyAg0HzJpSeIaanXCVwd4QZva9XM96WI7vvG/kOwNfHRO00ZZ8BDm5jhjcWEnDs0x4U+H9i44qXQ7S8KZ6eD+lTqO2qkp/mBDyd1ohE5TAT6vpE7nN3dUtPoOis7QbUFnosWcykzaGGnAbrrftkiFUAFaiFIsMOk+hk5mnYIiS5Z8MCC4+o0eTjULbLzFN6SLV4QRY6LjW8a7GwWyEwo3wo53HGy0vomzl7RUyLWeWU/swTtZ3yGGiHTMLLWnnE0H1WoZjew1jrTmajMbdsEot1fN2SrLlI3K31mQ4EiqOHTUar751wGFj5XegEHSpoKMmeImJRnkkaQlN/bll9XSijIksbwl35FU770u8dLLuiyuU0fVfofAHifYTaLb0zMKfwWlhhSh7Nur3PzNOeVia6r2gSN5kOmZGnMEIGpk72hkSCO+ikIOdoWeWeVODl7JXuKh7O55YkFEUMdsFonHtI+CAuw3+yJM9awL+hVwMoqKPXx3qxALYQ7E/7R2DVwfwTPvb2sSUHoFHwSsV2CvpbKduDQ3SXyDw5x25W0HD7kGvO1ECZ3i0OIiV18i5HSWgt5G7qyYB9o0Gil+6EZTpVdEn/iyeuaUk/2oZvin7XlbkjuXIjUUCHg0lktUsVsWBkTZFUhaFpdyJbiEe7DdCFE37EfK7wr7sgViBUSOFlLEjoEn/GA553wVwjeLfn0VoN4pMxGorzhOeUkw5j2B0+HvZs2rFtHP2hsFTaX3CGNTqAxE/qJNbQft4c7eyzJkjEk6996vmRXwN3kG8eOrgj1lgcwo08wwrWtIPkvHnOLJ3YvFSZPpR+uN94e0QpQ9P3tSfq+LN4oi2CcnjYnF6b6PlQiuPCrUYpA1hjCvs02wTu3gpOtJ/Z9RBGn0/lQOSwZkQQ8hbLvJDZeu1iry5gYRDMkZbmON96kQW7w02xT5NwaJHV1kBsv8124XTEgTJKbIkSsWJNhHgHMoYM4cePy/7+CLKymCPbkT9kk5dboP9IR2EGhlHPm22pA+CRREjGzZ1MqJaHj1KCvkyMwG4qQEB4V13h01y8YwjMW0NduEohYFp9OmFy1lEcaL9mOdSchwSBjQPOC473qoqiqvQ8a05OvxgjrGYuBS+id11eHwIB8d7XM86A2pVaiiaQLJVjPmKphRkkdLWvIH2m33tP1fxhzU9UNaK9K6331IWk2Ljbn8zj4e+OAgHkV4xUZzvB3F6h2E3y5uXoTN/yQSDKGGSJYawTS819bzWRm/bc1XEOaC8h0dIBx8KVJcxTOcfZeOO3SJNebVfzAAKnmxbVRRZ2JeFdlEElSUUGTPGhwzu9O0n4lmxQkhceaZ7yWN+rgcBFzVbrMLaUY+cjT1E3tzuEF3Q0gAus6/xoX8N7vp7+QHDT8OgOV0RW2Pj6ZOBEsuty6iLrU/4e/jpEOTOsp/jLDjy2bX1ylsvJHpC5wISF0Bs5UBJTqmW1OkywKJ4upfQE+h0Pzy7J3mdbYSE25c5w0nHCFBfJ0riqqsE4WZ0HF5QO3Xf1C8taPKsukHNzCWZ/JkFGKlQHDz1RkhLTnBl6YpR1L4VIYggxE5vxw51l8ZTRkREP0ukSKr1jerY++R5bTIDx2FJlm0GG7BnpziauS9tvNrFPv1StFlTz7lyebBwuIvvhXAr4zMV4DAc1+E2+mnkLre6CRcBsfKNzMcs2mACoVbq61c1gyItDCmn1Ohp6OCiRVJZHmb5D0G130x2gbtO6ziPF4XZS4XTINftyv1PuMGs0BqJokCIbTO91APiOV6b1cSiMz/3Npeinofh7+XvRe+CEiV0PzhetXjNShuMRCR3Zhg6OIO4TRzwRVfX2ljXjlHBOqnQAlFIkbPqbNm22Vvh9HwT//R8EeIwGgwhx7Fpk/u5oW9gPCxKG2DdDBJ9//HHXRL2b7W9C3Gool046G50wZx+ADl1GOJEDx++ue8FhhfwaZ/hnCdAdK2bt/v3v0SYrykxwMvmUcL70Q0wf0E0wG+ibMcBELT5qwkiDkotFqTQHgYZ9QjLsLb3wlBRoXpextZVL1G9zAXQTbVfxzutIdEPcUyGFe6c6vuGIDJrTL7sH4ftSpojZ1v5tsN+Q7NTJSDID9jbuAJJtyiDAls82Nnvwt/HJdQ4or+mawLJOOpDCcObbgVVIRzzPIZ6l1rVHAcHBTeW1V+tCqNE/OrvNCK/8wqXmsEm41jTLlZV+g9QLD3OvRUTMz2sfSwjTwHsn+42MKWKkAHZtEfx1fTg+QlB3IxDbNMHnnziIBDsXvL6RSQx/BPisy3JbTQ5+9SHByBkpf/COdN8L2ahZc5vqmzfi+A/V3BXna917h0D3So+6qx9oPbj0XeJlQ6VJC8picIcjaRJGmgtEj6d5aBKNnJLuEBDzN61HF+/xXbXtPASbNBHgMV6qRZO35mC+2gIOnMIDkc74U3RXaFOw8iuJJdXYHFhFu4NMIeLRFKXtsyDMmAZSfR2vG7uWqP7cb56OrUIervLHII5Aky0xwiL49vDvhvfJjTozNerVvNMqvdlCif5NMJGqgkv18wMvWJIVbjnM09eRW54doubayebybpYRGNFLhGLhZhO7muwfHrnsf6c1/E/NCrfHsA8CSpcoCH90XdqkJYPRGa4CR7I0IYDGr1ud1KZgDXzRTZ3XZNRyYh7huu8rsEzXRh83GRSrMmcQRaFV2YMeqHk9d81ZWSxDQPhsvBg0e1jsC6I4uWvsDL3dyuCqRCVC7uH/UMmG6ms5hHs8wJL/a8gt1z5RYdugiEo28ggNaC/8J0v2L8qPnrCq34Ko1zPOXyjDoESxysVKm0v3GfAt+uk6C65ziXMB54YQT0d5fw7gea2x3xhgB0NWolqjggam1xayw+Z0U9KHaYuteystaNwkEqr+O+XExfN+Dsxfn2pEaYmvBMTiXrqsR57ByMHrdPDBZbSpQ1k8wzkJbWatWI1rEwsWaV5b2V7FV6k+q1ZG+ltfza0tO8MGybbBVJSpCrRU5l5eyeO5jpYR9dwggW7JPkR+yD8EZ7GDAu5iD35l3jMYdMnccr/YYWup2mtV5GMRUOYvtA0OjCp8pYBKOdWBhqUOSXE5Ymd44lsGdN/GBA/iRWgs34Hu4ZOjrzVe9sw3TOFDhGkwb+f4lX9IkoMSpVdl0ENfZWo1S5ySaE7YvVF0Bzc/1dTgdeMoCHheql+YDCRfbwOJeCkK+nC2vYhViX+YtqpIW5YGXVN72Z9l9dPiksLggZpSg5HJlGJNeEhp1+bqZ/sYBBAk7cB9ynwOpzldh4UlPehIM8z/2zHcgfnDK7hKU+wXddD62uAKJr7MXqGBnTP4dv9FlhqDwZ4WuJzpgocfWFE/j7FSvOB4yUB1RKwTJpLuGRCgJDmViuBIdxUTQuqWxLazWkn1Hz4DGRaeubhdVAR5VTml5o+h+9WDwGTSRitnXxlKPdtN1bpnz7gHgkvT/yXp0WZ/qJ5wIOjgvst5yqAPV0rcgm1rg3EW+on6HQ1uGTqq9aVkJCXYQpBUXylKB1OpFbZExJT0viGaGncBbc2DhDGuoAAc3IrnpvxohyZXjZcChXpUIkKpaSg6XzObp/B142uir5Jc4w12ftblX0sDGIDVYsE5rkahEz8fnSPyXvug9t2Hv33a9Wil+54KO/2nMXYjcAmV5VXIqr0mP9I4gzoSGIuKX/vrSAug4+bsSBqJkUcqWYqAMNpthsa8fy9RROpUnSghHqWPqnDr9MHmZcH+m1IEtdb4SCLDB+S0pxQHN67CP8Y7ZbNfhqEAJtvS2GTmv5C4nVO9xySmhbuMEkGbv1uP/qUOwIQiqaewmxApRx42cr2YfH44O6GqoTLXX3UV8aZSywhu5F1XBIwT/rR4X2xBkWNlq7K10JGS9fLb6v7CCPAg8SVwoYTbrcPlhZWZcE8UNR0Ah++knSRbMomEmJ1cr5RQ80v8DtFulrWPqZE233taehJqYRLh4LoBrRoPifBV3TwcflM2k4rTLElLMZniWZtCCYJTWykM9FMlrwtKge+mtBSlg+6wlSCpgf3zKEHWBjpYb6R1pyRgTaA7xbkqLg0uCjas/kxzpVFHQogQc4Z/WcBlnoGQT5ocdUNpjmJWLWdEwWnOejbeAajWFUNP875AM/egZpbD0J99FnHLz1XNcAU1ZVZHB3RWdpok5dnRFiOiNsVT9mGdiYMgmMVd8Ww+j4toGEkQZ6c8wvGl6VuzmaaXHTdgJpR/ib3i8oKzOBCSi+Te0RKsTXDiDVOfI/NmgJCFvuv8cRgTE3damcmu19i6OLFhOEx0YBKGWd2pRAQzfbFHTmfx1wUvOYoOcO0gXF6U1ucxsGxvUc2ABlpYEo91Wvrm+uOO1tLsQwagoR/dIrDX1fpJIp0xZpNuRkGlOUj9R2w5+Kmf2XTl7AdWS1MV2L+ZRxvKDvL2bSQksbGr5OuBZCJ7CFyaqUVuoTST/VY+Bv1Rz1aUx4/IqTBG0iiXtANhvNFUMB/0t+08FwxF5E1EXSrirjiUfXsif30CEmPG8pyhm5EQwxW0Pd9HL7sWSR6QAtzLGvUM4DNlgep7+7spDBkaL1B6Qc9WQ4QhcOkU+uqyfCxXn3JurRIWhZX+G+mSUhKbblazOR96sUEM0eDvPc3KvS/aIk1fZgIL3Tg6h16LRtxakZJfF9Px9e6vtOKx85vsWblhiPjD7sGQ3z0REmOnXZMooch5uauOm3XUoXxd6ebzqiFoq4mBba39a2ofJjT88A7IQDeBAl7WiQxISXQnBr1D7eb9zvCfL20KBE5lSu9iQTb1A9RZw+iYRYWKR6kSnN0k7yX24nHiMd/V9fjSqvMPxtr+g1/soVdek/Zn9p0NlwGLqyGSt1MkWZ0pRq8qhze7aL4v8X8lLOvTae5po1tK8lHjPxp0bJxXeBMzRNkpn8qiCwk46Kz/JUS7QChLjF+5QGBmAlfdEbOqnI7WGNJO3/IwbTIPHQMBDjOn0gV/Oztg8d1ia5ciAZlUGoA1NbCdxvAyi8btoze16pN5E8zxCV9Gv7OJ/bfORChq4/EDogdOF1STQKRoMa/rd3Bfwl9RaFt8Hcs87Bt+5/MN7XndIKDtH0zBjLz8+kEDYf2PCPMT2Yqc3QJt1D85BI8lqIR/T8r60YldemM8b+08NemZURk1i/TmJs55qIjApUzkCLSPpKy4nNfZSHuS0vESkon6la8RAycE6D0Fcze4XMrLtis0kFogh+aiVVMKOdLo12OSnfn72NnkZQknX0sSXW1F1/vpo7h4p6+HKigAZhrF9RC0xP6Zx3WmBuLYhiF5E5/9ph3IYcAZ5VzSh26yon2EvvaKLHrTMkHWrcMbTngwA2teVxr5FP4BTJ+Btm/0cq2yAmMuNxBmdm6IQsS/1nxGwqviIaF20OLnhu9V6+KYh+oLHK4GsKhHo/6m5nGq+K39/ljKL3aBBcgIKy5En84lKuQ0EC7d4JWOxLGdkXpqKOa3LaGDSv8OZQ8ElQOils8o49EkjHZXIR0gKRzheY9JB0PxvIUoWOMtfv4jFfvHZxCZpI4Q7Py4H2RIDhk5PpDrk1K6qmPhRKfvHcZBhxfewE6M6vubPpoTbcf/Z+hAKVtJnfs7ITPkuhE/LkwOEh5c8hdBGMi216S0j21TTOXHlCLAhqY0YFArgnzmKjJmg8e3Mq80B38zEIoOZ52e6mqBz15wfI8wUNXfsphCmVIVIXxNE8U1vjYLkv0IVZ7ipxPgAS9q9cAd+PFjkEutHHz9JKROBH4ZtvWvuBx8ovaQgKUHZomgEJGfQnJcK3iaM7b7jVWRtGb4/GPbUPHA8caQE/mWiHuCRNLN7EF6/qj6PsyZsM+J7xQScYGX7xftCHbwLCT7sQrkbfBRt52hS2wwSrzdBSAei9WYpShPx5BnPnxfcCIG3A6G1cQacMHp/b6gFganpsnN0kn9/PrLmVhAAe3Sirurd7UQTpjo47NdQyKvSQjHMSsM+/42n3p7PNt9RJS8KCNwDP1lIYij6pjR9gFFadEgOuDBKKwApBCXYJsWOGvef4V7kI6pXl7rvvgrg/jXnsS4ZadaKnCQj0g6/p+WIx0dHCkUdTSKwNzlGvit2/K2YcimxjKGVeuWZRJSAH2MXqMb5K0ZbejOITH2dJ5nwCm4X1KgNOr9CWg2slIIGGgOJqyPPfvNtxdJcA4Z2NQWso7u+2A7fVFTdzIi8Se3b/JNe4QZ62mX3ujRelSwBW++tHGw/FNNQq7MRZDiJox9Fnl1cmbi0WP8g9mQDjrhl0VmygbMUW1H4PsWWmdKPavPyhXnIvDjRQ9FiJKuq+fkUxUQEWIX8Hv00HMA3pfkh0+B9TNCCnLusL1qSepcIYuYzJkL9yV8O1iOdbSgOpwBB+HWOOewomH5lxkKBJsfTu/Gcy505rDHm1N3Wfq/PpoGC9cL0knusoov6iWR7jVTE9mXIc9pk//tHj4wGZP/PtJYgCB16oztOX4kaw6lWKETwwvqYPxHdUYMeQH/CwU9Jcor3u6Y8aS+XRlqi+Z9QIFZGcXxPHzITBZtVeYD62dQB3rNehfBjxWjaZbbBou/VJkkMYm10Bf4/bdrqx0A2w28A6ZaKzH4gZbEcROW1V/aVMSfGaqXMY2Nph+oWE5HsISUUHfPAyVbUphJmMdKjNRawBKJ1YM6e+tHRP9TawGyV75MV2e+qT6lekkVBQwow56/027L4ZjpD+caU1c4IqbpSzD3NK+wop85BRVZvjJbTW1t6GggkLbsCYh6VY2trgxYDBITFqje1CkzxdKhujnFoXkAAHOMngxAuNzf1UaHVLZAVUAVfgF6p6sihCeZlaNOL51N5zlvwYuA+bi5/dJw9mYfecpMgb0lcwiROielLZIivHdoOPc/V2pNBHEiqbGEsF7DKtUbA/bPSlsJVD/r5fkeNTH2bAN12oMz/qgr0fEmCrXvzo/kxnzIcEuKYKDe+jqZpymfLpw/vDla+wSsB4Ql74oXk8wDC91GA5/u0XiB0eCGLbNH+M85W3T8S83E6UC3TjrB8bE/uvTrW9+P6Tl6O4vFG/onGD/0rDSpZuuCIQlwB4CnlDNuf/UXrXn6tlcd4u2SvMBIIJ9lBv5MD/UsC7IF1lSp3FM+T36oVVqs2Xd6xcrSm5fdTe/MxT7n6iBBGbKImxl28KJWvF799Cmi+2JC8aQ6LZuSW7Sks6JruOD0XrWWCW9iiFzlX/e+KR3ZZxcVnhBrd77rU5INuCohinvacV57p064MXQW/o/WVt8vlZg8/yUr/mp7Deq2B+uFONHalUBwwU+PLMsn1vEJ9tCNA8yWppuow07sB7vPVm1I4z2mWTw1nWkjuzmPf9RHPhb+HChHW3QN0JgHXmwOhkkRPcOjilgivQbGc+IbQfMMTv18SRCfAMEs3BLFFWg1/lDJ0yBaO81iZFzrplSHcI6+tlLZ8ZtY+kWIy09bNfTtoTbQiopsvDBGvGpFC6wkeJ+hcRWkyXOe11+PIc5EH3LguQ5u8CGuf0aA6Im7EwYZ4sD5WjYM61c8cGAxOYYq/WF/qbnzKLpscMQESXpmNQWRRwSntpUzFWnXKXRbqh8c3L8tNZyOzChFkXt/+lGL3ExOgS5Ch9j4l7KxtHSm7+GnhvzlU0fW2aAsSJJ+v7hsE1mxyvEwo8AmfRHOY0Szn5nUPMOGDqf2GjU1JNHJh/bs7JLwhtVlq2h4cMsSaJ/x4xx0RI+VAtVAyn1asJ6onoOIn6S80LJ/fuxXUvT6B7GxkWDzuhDObbSFnYJcS6L70SgDp7toIvZeCnqTtrcd9bBcga6NplRdIYHbjZg/fcs3XvWGbUfGexvzHIz4Ve90N7cmVqSblnFatoeKeeePB85WgMXWZzBRtJNb2Jf+qCS3fcIIrE+ULqK1KozvBuldBmWkBz2cQZYfZo/baEZKtAcKsresa9PN9QsR+Tmad/f+q+ghPsveJkYWw/YTWS/IZwPa1mxAdHzJ0BLLGhO4FIMJxt5+wl1pRdZXFGudWZS4/rERFcpWB1O+KhL8bebK8stIlWXbk2FWuVFOG7XoMDfg+i1GkxdpTRWxbFSPwg7JhpHtaDgjCoW3YukvpPG8HaQPCC45pmVTrJ+dY/ViRCK7wKvNYafsaAJIB6Te8BYE7TjhROQsjsgsmXmMzzphC+hw54M4x/cTNIvTTIID70A381AnVx107l4PuSp6izOgd4M0GlSSFEzFS8xAqURzaTYGQ3eKiRqiI0pJiJahk+0Iiito/K6CeI9Qag8yh0Mr6zHbJpEqNb61A1y/z18lUnMC32VDebzdgAuC0wadYjufpDQzd1q3CJvGr4I6ui4a9wsjbSUcrbDIZ0ModSKhs7De3qvlLRB7z6Cdi8uXNqgoz6f6yNsXv3p2osQq2WFAkBjPcZLnhwIbHoPq1FcsVbSIYH3LB5NllmxUN2MsXDPky3IWyOXRSlAckm2EwizdYA72OBw21chxC7aL/R6Tv3HiO5zuMcv3407DedlPuBTT13WtlUWV0TFLqIRpWePoL3TiaP1koUwGtYnCa5R64rz2v6AUnvC+FO1NgPRIoBxDscbYJn9fNeePMNz1j3vGWiUU0MjArI0fa5k3SBjuiPXlJPtc3LSO4s3NaOzF416Xb3fsZyBeMecsFwIaspAtoP84G0+erqCbaRdhn26mpOtUZO/dtY/UWx6+/NwaAdCLR5hjnlyZJbWlSwdDOk8Yxhcm+LsTHAEspULrJKisByAGgqoHbKz5LMRnyU7XLaW5lhOV2s8aIksVdVkWnk6isZpgKiE8vRHyyILQQ+ID/juMQiEHcK0pagGtBEnULuN+ZfpNQmPfw9LfkjoBeaSJZtMM6E4lFa/D3Mwbd5B3kuvMYOYt+vB6D3j+sw4w1ilYU84YnhJ3YK1s9/rhZOEiogjVRCZbuvjUQqxRYqquGDamSquekSdp7zOT85VoGBX1vjnDFAuRVBDBISh6O5fjf9IjCdRVLSCOHn4rP6IQJFfPFRSEV1JIIPplpPjTXraTCFfAAyrMCkot5CIjPu2ol2EsQlp0irNnl52Yczw4ofocLn3GzAfbLWCprnp2qqSb0Pdz6AX6VjdYx7IYfuCnpOJQNJXzaGyTUYIrBU04RPFyTB502cj4Rj6jfsHa8gSlEoiswSkYmDv3TwC07roHGwWUvf7u8YgRAfLLEWRPOir9/CBF4ZkZFSkj82ehdnwRzO2uz7cFbPHeBC+UILqzlnk8DDkL3nS0u9/xaoOuCamnUXKW7vW91IOvHsQBp/oPXbi3WnrbGxWWJAdfPY7ylIRfOU3hLQUeYpQPmlFCGxVJvOKx3lstHTvYzPhdOT0/L4v21+9rF1Q/g3PFc/ROHziYNdiby8YGLO02yC34O4+uhZzUUOwzVDiYbf76EMV/6pvMUhoJWbLYC790sZDcpxLRAywAH1eFaI9iQW8K5O1R7IXY2zotxWuLHmMGWwmLuuLuZiuGmiIiezZXzFbPbkatH6xxRSpjJDH/z8yPx8MGMW51YF1ZECpaJoRQDTuZHy4/PIi5MiVUcDWvJa9m4EVB/1xUdUqPce5UC5tIcoRz4xT3KaxTCLBpODYJkOK/rRJdeg2ljQHcY1gS3fZNmcmurng8eX180gTse3UPczzhdSkSdw4GgAgDL28pKDQOKEZQvgQKjAYrdCu8AnOAdizvTrmGYPTE36oGor5FPVAgvJmgwuYKvjiEV91D/vse1iDGy5qIdtnLhSBgmvOt0mzSCgp82KlBETHOhfRBAAmSI+g47S65j0ZoU7SrUPaxAwzh2HQq86OQSa0PnPgDG9yGODbtRU2UyRmUlbqIXZuLgZ/JrbbFfVTJyu5wDqbGrjLxPoewJ9hRTTWTny5okrGZIhVFlpQYiDNtw1Y+O426lC/vWjxoBZpt7F1iQRS/HpGqwrnaVo1qvrJXDVVka4rfEhx22/eo0HWoi0JiIXlgNZZPCOo6pXXY9fLXyl2G4ZbtZyhrHVcWQB3K6b2XN4vUqiXwy9KilmVryPAYbM9Fxa7hYwJun9b+AXw48Gpb5vwSVCSzcc4nCOe/RJFpHldkoayyUPJV3WHEPZTMxnDQ9Y9Jrj+hW5ZiPlQw2grWxispPmUfLTrCZZ44tZulhXuDz+PzESXzvFvGoMgb2Qh37VkHMIsJgMRc7cNKcjUOpdQL8nWF9LGsHtjPg8VW1zr4SyaPXvmtUeTXjeOnUMLcibHmJtQ1BYZ5mZMuXbM689gF8xDUs8mbnrsZ931Bwd7gGM8NNP1BkWFAA+lFoSFuQjzVo75BcX1laD/nqd6K5bS4/s7WnyaotV7u+8wudLdfv9Y4Hq8pZX/bE3P7uleAAe+pLDSx84yJ+cC2BbqVgypf8H8xGKnfA16frYXrbHv+QrAvAcqZzuo79//bqTxHUNQ4E21gPoWKOlxEDDjsy63cHSlCEU4u3dR0DQmM7R8NhsDR8XhOR/8EEIwdkEnfazZYqmSQ707xrB5Nv9kKwEJ++ai9FSX0hkQTvRbhdJ7hhFGacwoy4iZR9SYveIe3KvjYuYVcfAUqNBRv0g/7ZYvqS59p4Ngl6+wW6MPOK1JqHNtVnu3C15+Sa69E+4TqTUS89Q/uUdfOsw4uf0YwNnWNqbnpq2mcEc74uM29RS98XKbdnaep76GazAqJdYVUts7IWYxF058OB1u3qOcU6IXgH6r1Z1SEGFJcVgJXvpW9tS2wtThN+/Bb+p1HLXribf5LhRzsqRseulQmMYDaMrYww7mX0FDuXUI+uvZqbbn1ezGAyQKHVnnekktp2Ep7N1UlIN5gCcfknwgOpMIaKt4tL1giDDCnywVvxMCYrTA2mdCmsHfXHMO/TIiFIL0zlVTzAD7+GygCw1DVTP5Hv1C+YvlJDT1R2mOV79JOgJGcx9DynD8/pIxv/1Q77L1DNSbYmte6SHawsiwsooDGgoYvNCyFkfHa9RcOB7/ZzK+n19hngYVdsbF9fON2WBYHbrpxY8ufuggrtlNI93jbDlleK5mt/shvh3LlYuK+1PiDgpN4RODiXVMpmVHjq8cz3ofzDqJHazmbSVdQnIj3fQGRr6L/XLexT9bY59ZaqrEZPKXvfUsjHJTDsVOJpfYxHp2v97tnsQPpiA70v/Y3rK7FmuE0waQc/ugcXCzVGscntOd1lVLXKeB7JQMd8VZhgV4MKCFompJjW9ffMilqn8OIR6QUqEVSGNCc2n8B6L/CtQmP8eHZ8U+0aiEPrabJA/7w+3tKHdHZrGCgNhgAt6AAlzEUHYH8j4CE03xudUIAaz7htA1g3+U2KlsTL9F93A9KDKrZrSCZJRMbE4nv+70oYVQC6zHywILPZTH9aL79U8v1yZKGIi9GT+SGVrEBtYf9WjrouotLEtvXp2sbnIuTK3jH3p7lEqcolb5rnL76bi88TMzkRclrnyyB4neK1Y7hhJ9RPfceMu/DiW7igmDHO7gFR64idbpJcjd5TIdTy0AiajehpYbQs2Ctde5z+rrGeUc2EyCIKo+eDPuXRuzPfOkYQuSenUH41KPiGs5GlSJsDTzyTEr0jlJWIKEwYM4st0/dyOn7zQdu7yp8crDON9UPCCad/AdLWBVQVHpPfTqi7XHJ3SJHpi3wYnNusLrd6p42Ei0r6GQavLAFzEpFS+vJlvbCt8UqwyHqkwr6Zmwvgoypn6zFd+D/pDtPO8FSdtkUW5vK8r6+3kmOAo2AQerp8Rez40KjYZrQ3iROmEOER1ZtTXvtrcKZSWqzV0JgCaNfI1yxQb4Nmc0QrjqMThAgkWmApNpdAaxeatMwc07w15BmMcTyOQQmsu3XkfzqjZeuJMrrsruk7/mcdD3MrHMr4x4bYUNNZnuPrZYgED5rVJHrUhnR2gd1Q84Lfxsl4KeA/eoU7JB3T2uqfoNEFi1Ii4lAIDm1hMb8bDCIcFh9nEb2YLh4q2XILHEdZ+ZxY+kDI2KJ93byLCX7ijr8FzLyuEtOldvaR65aCFJklLJGSayW8mT9Eu+coKNLBo+EtvYkmuPX38nKoZ/zIVHsmJmL5MYTp6KrJxQWkKuFmBxDTTRt9eDGLzuJboNpeHP60/qEyvDzTw+Jv9Lc9U/BdddnrsPlBoTzukpL9faD0M0bx8tsxAe7LwymR8cYpzDPcYcufQ0qU/og783+YYJr3GwbDwYcVjJDBhV13Ci0ilYoR77BuItvYAu95bLHSkynpC2vhpTGK0CoHoUoJ7jmTSiCFVn87ccWQJzIurha5nTyYAowB6AFaMGnzgXQgWwfuEvxNweY8caVMvrrGoaymdIflBPzOz7fQ7JXAEWAtw21faCxgiWeCe7MaeDTilCLyJwvkZ7RWIhcNWAIRNSYkJBcebNXMiYJxFcGzmtRTTE/PN183MFt53b+w7WJLBFy0wVhEndvBvY/TOqMlRV9MvTaVK3nSruBuBWtH7OyY9gtbGknFLz/h9Kb1e6fCP+UKec4uOU7lo6ZAWwFlwd4LhT0U1NZ1YL8QWkQs+6odxRlGpy0rxTHismGYeOXI9I01WU3BT0BQPCGcPGIzivCi214NwBBIPqhGxq29+wPPx+QLHROQlvS3WaMH9YXzGMOTmcENThVVuWTZXE1hF02+7aTH3KwjLHtlU8ePOP/ZGL5vOYZiYY4SZNQETu8mxCoeNNdpwdA3GlaVZ64jbe35iRpwIk/wWYfoESSUM0qV1WLno1GEwwSUUnu2Y/hJJYPgDwrRNQ6AN/K72qCucfPgL6yM9vZ333wJ2CuqeCiy+IGIAPFHMMARoKQQzDk3ufz3NELAeWAvxlxsE1MrSl2IYi8SdYeVChXPUinThxlWxxhCQkFaCoNVVjtctzLm8BjmGPFzwNpaGeWD083hHP6/d/tU+oUUia1EEa/Fug5vBMHlEefK7d8KlvHQd7rca0A4yKL8euZ0fyoZEI9HBvpMURCOs40Ue0p4Puawq8hHLboSmAd9Zk8QZhZ//XfBKfBDfsWTDQl9ccaP515kgOPEGUiT3K7nR2QKm0TA3tHx9VCYdVpxzMKW5Q6vdSg1/L7HzXqOtxB9CIEmhIlF5YbUpdKN6HF6Ud1Wb3H8QF9Hv3VCaGcY+VmOt2ZnS+VuzsBX4BE9fy9zyHKJtbDT7gZeCkgXHt9Kny2ZBjBV7ESyrjB4IbQToE3RKZVst+NX4Ozzcb6owiMHTrqFFTjodX7NVEdZ+Y/plUtt0C0irptIwwheZ5lpmR6XL/Hie8BhLyO7RXfo2vKKNhZs5gjQ96pGTDNRjIqEZfcdM7GIqWZXV6eHaZJ2X2J5IaBF6Rlo6T4oY3lehQNNrPyrzQaIbUkywRDfzke1zERoTzB1N0NsWk0ZcIkBIDYlV6FkvJ0R65IW+PazKv6sODKe1ltqRjX8BB9U1J0yUloO6qzML/5XEBx/Z8bkCgzDGIeiqtYahfAg0+VRBaPzfceGr3P2PttOuQQR918oGXb+O13uZJDmYMAXNcuFtb5lqUxbTaWa7vRrrIa0i67IqEPAwNBfG2WT4zdffCsphSxDcFNElrEYGWSFKwaF5ACP1PEz1WgpjueIMEVHvqXEhWzlaVYqhUbEmNjqkJD0nWTIKNWIwVppO6jVdnavZTtf0WQcB5TZNRj4nY9iF+Vj8Zs3SKADwpIcZfQqC43e0n4Cvh8iXsGW/76zbDl0P1986MRqU3gYrJrgMYfvJdQi3mZJnHvfKf93FMQ8AWCrISElLncQasIZ2VF+EIF5qKgCP74UWBg38ug0vOvINlpIBkattzI8OkEHbr8ar0Ecw+1+KtueCDKh5hf39EnwFZ8b1PsG7Y+wxWOWXxAv+dHyDzd/JEGQZF2A8Etrz2YYu8rJf4d4omu0k7SkeDsZwhwOlvVZ/Mu0DaoQ7pDwkT4mo+GxwjCXw4U2OLfdW7xOd8+1RbtfFBO9j2tOSLmbN+1hkPD/rLPrG+MuhsWoZuPIxEoRNyrCvAXShrWtfduOdst0klBDqx/medMWcuD0tDtX/FUPadwPpzGFX7xjJxjzWGT2DYxu+oAk+CWiULowz1XN/15Dg2wguNFUqpxau917fah+KEN/4Y2R4y7/xcTpfwNpIE3BKCPyCLV6gxot2jJy6/uUfl6BkeCtaOIWrWE5E3o55WYoBMGgqUHAzdfYC1BZmS6Gbf8m6tCxydHRjpx0IlrIvbaNVPgT2sBi0+myPOefzEdMXYpDQ4PGcpz8XcU2Zd6Iv+AATaTMXS8OIp069Z0bhOv4O711mWAyOI9M0ZMNC3ACAJwFdxH2/54FonyMlQcBBBWCsacggROjlkG5UaAEn/5KeIVjw18DV8mw3ke/vp5WXZU56KyRk9Sse5sE0d2nvVwcS/N0bkFPwkAOjSB+ndi6f/XyO81o4UJ4vvyYpAINzIiE7ePPc+9/rdswXMcG6MYsrDDLwgeIG9QYY+n6vWqPdgbaU7DpgutKiA9ju3IY/5Hat1c9zkXQNhuXIcYowURduJpiaQ8nP66QCnCAlOMKBcuVm48T85zIoV/O+Mn1m7YzJ0Esp64d9JwtIeNxDwZrv0x9O7gnue0eDTB+kscOHWKn9Iy8P6aevyEVqQNV7xG4qWG03ZBwtbHe8rJOPk1zGeKMaO1NsDAqyyUqOiebiLjaTCaw8TnhjrCoGj3hmddRu/qN3Hm5oq7iaE610fs8Ykjk9/E8bcAR4jFA3TaiYUjkKmVB2ef0Lhv0rOaho1+/CIVE0bho8pHJlhqa+2Gm0fHJJQ50jpfPCjbF+YNqGGkg5OaET4tkD4FR7smv7Gpz2zXx62Sr9N+Q8DWIwxkNZDZjlTlZ4CwiCTfGXjqKAmiDGfWSQD/tpjEyCX21fIxD7BFHIaTRbcxn2RWBXIZ6ZSYDZkX4it2sGGSf6BBQ9CWzMVJY+al6/pfP44obBVlSDQmiEPnLDAOl9UYj8ou0ERZ4JykNRR1pixLyK3lMbM6M2K8BzgBnPHIOMaSU8xt3MOWduLqFpOU0giUgedRs4kDpD9/EU7UUAtOrHCD/e/s4c8pxjUo1WNEb5csGAQDbtyTGmCuiJNT93/VPeSicIsmtvXacq0zlBGdr0CPkBAAD//5VYk3s=" + +// Set accessor to a real function. +func init() { + compressedBytePointsFn = func() string { + return compressedBytePoints + } +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go new file mode 100644 index 0000000000..c9d47f3078 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/curve.go @@ -0,0 +1,1272 @@ +// Copyright (c) 2015-2022 The Decred developers +// Copyright 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "encoding/hex" + "math/bits" +) + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) +// +// [BRID]: On Binary Representations of Integers with Digits -1, 0, 1 +// (Prodinger, Helmut) +// +// [STWS]: Secure-TWS: Authenticating Node to Multi-user Communication in +// Shared Sensor Networks (Oliveira, Leonardo B. et al) + +// All group operations are performed using Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1^2 and y = y1/z1^3. + +// hexToFieldVal converts the passed hex string into a FieldVal and will panic +// if there is an error. This is only provided for the hard-coded constants so +// errors in the source code can be detected. It will only (and must only) be +// called with hard-coded values. +func hexToFieldVal(s string) *FieldVal { + b, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + var f FieldVal + if overflow := f.SetByteSlice(b); overflow { + panic("hex in source file overflows mod P: " + s) + } + return &f +} + +// hexToModNScalar converts the passed hex string into a ModNScalar and will +// panic if there is an error. This is only provided for the hard-coded +// constants so errors in the source code can be detected. It will only (and +// must only) be called with hard-coded values. +func hexToModNScalar(s string) *ModNScalar { + var isNegative bool + if len(s) > 0 && s[0] == '-' { + isNegative = true + s = s[1:] + } + if len(s)%2 != 0 { + s = "0" + s + } + b, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + var scalar ModNScalar + if overflow := scalar.SetByteSlice(b); overflow { + panic("hex in source file overflows mod N scalar: " + s) + } + if isNegative { + scalar.Negate() + } + return &scalar +} + +var ( + // The following constants are used to accelerate scalar point + // multiplication through the use of the endomorphism: + // + // φ(Q) ⟼ λ*Q = (β*Q.x mod p, Q.y) + // + // See the code in the deriveEndomorphismParams function in genprecomps.go + // for details on their derivation. + // + // Additionally, see the scalar multiplication function in this file for + // details on how they are used. + endoNegLambda = hexToModNScalar("-5363ad4cc05c30e0a5261c028812645a122e22ea20816678df02967c1b23bd72") + endoBeta = hexToFieldVal("7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee") + endoNegB1 = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c3") + endoNegB2 = hexToModNScalar("-3086d221a7d46bcde86c90e49284eb15") + endoZ1 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f") + endoZ2 = hexToModNScalar("e4437ed6010e88286f547fa90abfe4c4221208ac9df506c6") + + // Alternatively, the following parameters are valid as well, however, + // benchmarks show them to be about 2% slower in practice. + // endoNegLambda = hexToModNScalar("-ac9c52b33fa3cf1f5ad9e3fd77ed9ba4a880b9fc8ec739c2e0cfc810b51283ce") + // endoBeta = hexToFieldVal("851695d49a83f8ef919bb86153cbcb16630fb68aed0a766a3ec693d68e6afa40") + // endoNegB1 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb15") + // endoNegB2 = hexToModNScalar("-114ca50f7a8e2f3f657c1108d9d44cfd8") + // endoZ1 = hexToModNScalar("114ca50f7a8e2f3f657c1108d9d44cfd95fbc92c10fddd145") + // endoZ2 = hexToModNScalar("3086d221a7d46bcde86c90e49284eb153daa8a1471e8ca7f") +) + +// JacobianPoint is an element of the group formed by the secp256k1 curve in +// Jacobian projective coordinates and thus represents a point on the curve. +type JacobianPoint struct { + // The X coordinate in Jacobian projective coordinates. The affine point is + // X/z^2. + X FieldVal + + // The Y coordinate in Jacobian projective coordinates. The affine point is + // Y/z^3. + Y FieldVal + + // The Z coordinate in Jacobian projective coordinates. + Z FieldVal +} + +// MakeJacobianPoint returns a Jacobian point with the provided X, Y, and Z +// coordinates. +func MakeJacobianPoint(x, y, z *FieldVal) JacobianPoint { + var p JacobianPoint + p.X.Set(x) + p.Y.Set(y) + p.Z.Set(z) + return p +} + +// Set sets the Jacobian point to the provided point. +func (p *JacobianPoint) Set(other *JacobianPoint) { + p.X.Set(&other.X) + p.Y.Set(&other.Y) + p.Z.Set(&other.Z) +} + +// ToAffine reduces the Z value of the existing point to 1 effectively +// making it an affine coordinate in constant time. The point will be +// normalized. +func (p *JacobianPoint) ToAffine() { + // Inversions are expensive and both point addition and point doubling + // are faster when working with points that have a z value of one. So, + // if the point needs to be converted to affine, go ahead and normalize + // the point itself at the same time as the calculation is the same. + var zInv, tempZ FieldVal + zInv.Set(&p.Z).Inverse() // zInv = Z^-1 + tempZ.SquareVal(&zInv) // tempZ = Z^-2 + p.X.Mul(&tempZ) // X = X/Z^2 (mag: 1) + p.Y.Mul(tempZ.Mul(&zInv)) // Y = Y/Z^3 (mag: 1) + p.Z.SetInt(1) // Z = 1 (mag: 1) + + // Normalize the x and y values. + p.X.Normalize() + p.Y.Normalize() +} + +// addZ1AndZ2EqualsOne adds two Jacobian points that are already known to have +// z values of 1 and stores the result in the provided result param. That is to +// say result = p1 + p2. It performs faster addition than the generic add +// routine since less arithmetic is needed due to the ability to avoid the z +// value multiplications. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ1AndZ2EqualsOne(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-mmadd-2007-bl + // + // In particular it performs the calculations using the following: + // H = X2-X1, HH = H^2, I = 4*HH, J = H*I, r = 2*(Y2-Y1), V = X1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = 2*H + // + // This results in a cost of 4 field multiplications, 2 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1 := &p1.X, &p1.Y + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. + if x1.Equals(x2) { + if y1.Equals(y2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, i, j, r, v FieldVal + var negJ, neg2V, negX3 FieldVal + h.Set(x1).Negate(1).Add(x2) // H = X2-X1 (mag: 3) + i.SquareVal(&h).MulInt(4) // I = 4*H^2 (mag: 4) + j.Mul2(&h, &i) // J = H*I (mag: 1) + r.Set(y1).Negate(1).Add(y2).MulInt(2) // r = 2*(Y2-Y1) (mag: 6) + v.Mul2(x1, &i) // V = X1*I (mag: 1) + negJ.Set(&j).Negate(1) // negJ = -J (mag: 2) + neg2V.Set(&v).MulInt(2).Negate(2) // neg2V = -(2*V) (mag: 3) + x3.Set(&r).Square().Add(&negJ).Add(&neg2V) // X3 = r^2-J-2*V (mag: 6) + negX3.Set(x3).Negate(6) // negX3 = -X3 (mag: 7) + j.Mul(y1).MulInt(2).Negate(2) // J = -(2*Y1*J) (mag: 3) + y3.Set(&v).Add(&negX3).Mul(&r).Add(&j) // Y3 = r*(V-X3)-2*Y1*J (mag: 4) + z3.Set(&h).MulInt(2) // Z3 = 2*H (mag: 6) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addZ1EqualsZ2 adds two Jacobian points that are already known to have the +// same z value and stores the result in the provided result param. That is to +// say result = p1 + p2. It performs faster addition than the generic add +// routine since less arithmetic is needed due to the known equivalence. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ1EqualsZ2(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using a slightly modified version + // of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-zadd-2007-m + // + // In particular it performs the calculations using the following: + // A = X2-X1, B = A^2, C=Y2-Y1, D = C^2, E = X1*B, F = X2*B + // X3 = D-E-F, Y3 = C*(E-X3)-Y1*(F-E), Z3 = Z1*A + // + // This results in a cost of 5 field multiplications, 2 field squarings, + // 9 field additions, and 0 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. + if x1.Equals(x2) { + if y1.Equals(y2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var a, b, c, d, e, f FieldVal + var negX1, negY1, negE, negX3 FieldVal + negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) + negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2) + a.Set(&negX1).Add(x2) // A = X2-X1 (mag: 3) + b.SquareVal(&a) // B = A^2 (mag: 1) + c.Set(&negY1).Add(y2) // C = Y2-Y1 (mag: 3) + d.SquareVal(&c) // D = C^2 (mag: 1) + e.Mul2(x1, &b) // E = X1*B (mag: 1) + negE.Set(&e).Negate(1) // negE = -E (mag: 2) + f.Mul2(x2, &b) // F = X2*B (mag: 1) + x3.Add2(&e, &f).Negate(2).Add(&d) // X3 = D-E-F (mag: 4) + negX3.Set(x3).Negate(4) // negX3 = -X3 (mag: 5) + y3.Set(y1).Mul(f.Add(&negE)).Negate(1) // Y3 = -(Y1*(F-E)) (mag: 2) + y3.Add(e.Add(&negX3).Mul(&c)) // Y3 = C*(E-X3)+Y3 (mag: 3) + z3.Mul2(z1, &a) // Z3 = Z1*A (mag: 1) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addZ2EqualsOne adds two Jacobian points when the second point is already +// known to have a z value of 1 (and the z value for the first point is not 1) +// and stores the result in the provided result param. That is to say result = +// p1 + p2. It performs faster addition than the generic add routine since +// less arithmetic is needed due to the ability to avoid multiplications by the +// second point's z value. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addZ2EqualsOne(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl + // + // In particular it performs the calculations using the following: + // Z1Z1 = Z1^2, U2 = X2*Z1Z1, S2 = Y2*Z1*Z1Z1, H = U2-X1, HH = H^2, + // I = 4*HH, J = H*I, r = 2*(S2-Y1), V = X1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*Y1*J, Z3 = (Z1+H)^2-Z1Z1-HH + // + // This results in a cost of 7 field multiplications, 4 field squarings, + // 9 field additions, and 4 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2 := &p2.X, &p2.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity per the group law for elliptic curve cryptography. Since + // any number of Jacobian coordinates can represent the same affine + // point, the x and y values need to be converted to like terms. Due to + // the assumption made for this function that the second point has a z + // value of 1 (z2=1), the first point is already "converted". + var z1z1, u2, s2 FieldVal + z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) + u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1) + s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1) + if x1.Equals(&u2) { + if y1.Equals(&s2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, hh, i, j, r, rr, v FieldVal + var negX1, negY1, negX3 FieldVal + negX1.Set(x1).Negate(1) // negX1 = -X1 (mag: 2) + h.Add2(&u2, &negX1) // H = U2-X1 (mag: 3) + hh.SquareVal(&h) // HH = H^2 (mag: 1) + i.Set(&hh).MulInt(4) // I = 4 * HH (mag: 4) + j.Mul2(&h, &i) // J = H*I (mag: 1) + negY1.Set(y1).Negate(1) // negY1 = -Y1 (mag: 2) + r.Set(&s2).Add(&negY1).MulInt(2) // r = 2*(S2-Y1) (mag: 6) + rr.SquareVal(&r) // rr = r^2 (mag: 1) + v.Mul2(x1, &i) // V = X1*I (mag: 1) + x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4) + x3.Add(&rr) // X3 = r^2+X3 (mag: 5) + negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6) + y3.Set(y1).Mul(&j).MulInt(2).Negate(2) // Y3 = -(2*Y1*J) (mag: 3) + y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4) + z3.Add2(z1, &h).Square() // Z3 = (Z1+H)^2 (mag: 1) + z3.Add(z1z1.Add(&hh).Negate(2)) // Z3 = Z3-(Z1Z1+HH) (mag: 4) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// addGeneric adds two Jacobian points without any assumptions about the z +// values of the two points and stores the result in the provided result param. +// That is to say result = p1 + p2. It is the slowest of the add routines due +// to requiring the most arithmetic. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func addGeneric(p1, p2, result *JacobianPoint) { + // To compute the point addition efficiently, this implementation splits + // the equation into intermediate elements which are used to minimize + // the number of field multiplications using the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + // + // In particular it performs the calculations using the following: + // Z1Z1 = Z1^2, Z2Z2 = Z2^2, U1 = X1*Z2Z2, U2 = X2*Z1Z1, S1 = Y1*Z2*Z2Z2 + // S2 = Y2*Z1*Z1Z1, H = U2-U1, I = (2*H)^2, J = H*I, r = 2*(S2-S1) + // V = U1*I + // X3 = r^2-J-2*V, Y3 = r*(V-X3)-2*S1*J, Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2)*H + // + // This results in a cost of 11 field multiplications, 5 field squarings, + // 9 field additions, and 4 integer multiplications. + x1, y1, z1 := &p1.X, &p1.Y, &p1.Z + x2, y2, z2 := &p2.X, &p2.Y, &p2.Z + x3, y3, z3 := &result.X, &result.Y, &result.Z + + // When the x coordinates are the same for two points on the curve, the + // y coordinates either must be the same, in which case it is point + // doubling, or they are opposite and the result is the point at + // infinity. Since any number of Jacobian coordinates can represent the + // same affine point, the x and y values need to be converted to like + // terms. + var z1z1, z2z2, u1, u2, s1, s2 FieldVal + z1z1.SquareVal(z1) // Z1Z1 = Z1^2 (mag: 1) + z2z2.SquareVal(z2) // Z2Z2 = Z2^2 (mag: 1) + u1.Set(x1).Mul(&z2z2).Normalize() // U1 = X1*Z2Z2 (mag: 1) + u2.Set(x2).Mul(&z1z1).Normalize() // U2 = X2*Z1Z1 (mag: 1) + s1.Set(y1).Mul(&z2z2).Mul(z2).Normalize() // S1 = Y1*Z2*Z2Z2 (mag: 1) + s2.Set(y2).Mul(&z1z1).Mul(z1).Normalize() // S2 = Y2*Z1*Z1Z1 (mag: 1) + if u1.Equals(&u2) { + if s1.Equals(&s2) { + // Since x1 == x2 and y1 == y2, point doubling must be + // done, otherwise the addition would end up dividing + // by zero. + DoubleNonConst(p1, result) + return + } + + // Since x1 == x2 and y1 == -y2, the sum is the point at + // infinity per the group law. + x3.SetInt(0) + y3.SetInt(0) + z3.SetInt(0) + return + } + + // Calculate X3, Y3, and Z3 according to the intermediate elements + // breakdown above. + var h, i, j, r, rr, v FieldVal + var negU1, negS1, negX3 FieldVal + negU1.Set(&u1).Negate(1) // negU1 = -U1 (mag: 2) + h.Add2(&u2, &negU1) // H = U2-U1 (mag: 3) + i.Set(&h).MulInt(2).Square() // I = (2*H)^2 (mag: 1) + j.Mul2(&h, &i) // J = H*I (mag: 1) + negS1.Set(&s1).Negate(1) // negS1 = -S1 (mag: 2) + r.Set(&s2).Add(&negS1).MulInt(2) // r = 2*(S2-S1) (mag: 6) + rr.SquareVal(&r) // rr = r^2 (mag: 1) + v.Mul2(&u1, &i) // V = U1*I (mag: 1) + x3.Set(&v).MulInt(2).Add(&j).Negate(3) // X3 = -(J+2*V) (mag: 4) + x3.Add(&rr) // X3 = r^2+X3 (mag: 5) + negX3.Set(x3).Negate(5) // negX3 = -X3 (mag: 6) + y3.Mul2(&s1, &j).MulInt(2).Negate(2) // Y3 = -(2*S1*J) (mag: 3) + y3.Add(v.Add(&negX3).Mul(&r)) // Y3 = r*(V-X3)+Y3 (mag: 4) + z3.Add2(z1, z2).Square() // Z3 = (Z1+Z2)^2 (mag: 1) + z3.Add(z1z1.Add(&z2z2).Negate(2)) // Z3 = Z3-(Z1Z1+Z2Z2) (mag: 4) + z3.Mul(&h) // Z3 = Z3*H (mag: 1) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// AddNonConst adds the passed Jacobian points together and stores the result in +// the provided result param in *non-constant* time. +// +// NOTE: The points must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func AddNonConst(p1, p2, result *JacobianPoint) { + // The point at infinity is the identity according to the group law for + // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. + if (p1.X.IsZero() && p1.Y.IsZero()) || p1.Z.IsZero() { + result.Set(p2) + return + } + if (p2.X.IsZero() && p2.Y.IsZero()) || p2.Z.IsZero() { + result.Set(p1) + return + } + + // Faster point addition can be achieved when certain assumptions are + // met. For example, when both points have the same z value, arithmetic + // on the z values can be avoided. This section thus checks for these + // conditions and calls an appropriate add function which is accelerated + // by using those assumptions. + isZ1One := p1.Z.IsOne() + isZ2One := p2.Z.IsOne() + switch { + case isZ1One && isZ2One: + addZ1AndZ2EqualsOne(p1, p2, result) + return + case p1.Z.Equals(&p2.Z): + addZ1EqualsZ2(p1, p2, result) + return + case isZ2One: + addZ2EqualsOne(p1, p2, result) + return + } + + // None of the above assumptions are true, so fall back to generic + // point addition. + addGeneric(p1, p2, result) +} + +// doubleZ1EqualsOne performs point doubling on the passed Jacobian point when +// the point is already known to have a z value of 1 and stores the result in +// the provided result param. That is to say result = 2*p. It performs faster +// point doubling than the generic routine since less arithmetic is needed due +// to the ability to avoid multiplication by the z value. +// +// NOTE: The resulting point will be normalized. +func doubleZ1EqualsOne(p, result *JacobianPoint) { + // This function uses the assumptions that z1 is 1, thus the point + // doubling formulas reduce to: + // + // X3 = (3*X1^2)^2 - 8*X1*Y1^2 + // Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4 + // Z3 = 2*Y1 + // + // To compute the above efficiently, this implementation splits the + // equation into intermediate elements which are used to minimize the + // number of field multiplications in favor of field squarings which + // are roughly 35% faster than field multiplications with the current + // implementation at the time this was written. + // + // This uses a slightly modified version of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-mdbl-2007-bl + // + // In particular it performs the calculations using the following: + // A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C) + // E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C + // Z3 = 2*Y1 + // + // This results in a cost of 1 field multiplication, 5 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1 := &p.X, &p.Y + x3, y3, z3 := &result.X, &result.Y, &result.Z + var a, b, c, d, e, f FieldVal + z3.Set(y1).MulInt(2) // Z3 = 2*Y1 (mag: 2) + a.SquareVal(x1) // A = X1^2 (mag: 1) + b.SquareVal(y1) // B = Y1^2 (mag: 1) + c.SquareVal(&b) // C = B^2 (mag: 1) + b.Add(x1).Square() // B = (X1+B)^2 (mag: 1) + d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3) + d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8) + e.Set(&a).MulInt(3) // E = 3*A (mag: 3) + f.SquareVal(&e) // F = E^2 (mag: 1) + x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17) + x3.Add(&f) // X3 = F+X3 (mag: 18) + f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1) + y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9) + y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// doubleGeneric performs point doubling on the passed Jacobian point without +// any assumptions about the z value and stores the result in the provided +// result param. That is to say result = 2*p. It is the slowest of the point +// doubling routines due to requiring the most arithmetic. +// +// NOTE: The resulting point will be normalized. +func doubleGeneric(p, result *JacobianPoint) { + // Point doubling formula for Jacobian coordinates for the secp256k1 + // curve: + // + // X3 = (3*X1^2)^2 - 8*X1*Y1^2 + // Y3 = (3*X1^2)*(4*X1*Y1^2 - X3) - 8*Y1^4 + // Z3 = 2*Y1*Z1 + // + // To compute the above efficiently, this implementation splits the + // equation into intermediate elements which are used to minimize the + // number of field multiplications in favor of field squarings which + // are roughly 35% faster than field multiplications with the current + // implementation at the time this was written. + // + // This uses a slightly modified version of the method shown at: + // https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + // + // In particular it performs the calculations using the following: + // A = X1^2, B = Y1^2, C = B^2, D = 2*((X1+B)^2-A-C) + // E = 3*A, F = E^2, X3 = F-2*D, Y3 = E*(D-X3)-8*C + // Z3 = 2*Y1*Z1 + // + // This results in a cost of 1 field multiplication, 5 field squarings, + // 6 field additions, and 5 integer multiplications. + x1, y1, z1 := &p.X, &p.Y, &p.Z + x3, y3, z3 := &result.X, &result.Y, &result.Z + var a, b, c, d, e, f FieldVal + z3.Mul2(y1, z1).MulInt(2) // Z3 = 2*Y1*Z1 (mag: 2) + a.SquareVal(x1) // A = X1^2 (mag: 1) + b.SquareVal(y1) // B = Y1^2 (mag: 1) + c.SquareVal(&b) // C = B^2 (mag: 1) + b.Add(x1).Square() // B = (X1+B)^2 (mag: 1) + d.Set(&a).Add(&c).Negate(2) // D = -(A+C) (mag: 3) + d.Add(&b).MulInt(2) // D = 2*(B+D)(mag: 8) + e.Set(&a).MulInt(3) // E = 3*A (mag: 3) + f.SquareVal(&e) // F = E^2 (mag: 1) + x3.Set(&d).MulInt(2).Negate(16) // X3 = -(2*D) (mag: 17) + x3.Add(&f) // X3 = F+X3 (mag: 18) + f.Set(x3).Negate(18).Add(&d).Normalize() // F = D-X3 (mag: 1) + y3.Set(&c).MulInt(8).Negate(8) // Y3 = -(8*C) (mag: 9) + y3.Add(f.Mul(&e)) // Y3 = E*F+Y3 (mag: 10) + + // Normalize the resulting field values as needed. + x3.Normalize() + y3.Normalize() + z3.Normalize() +} + +// DoubleNonConst doubles the passed Jacobian point and stores the result in the +// provided result parameter in *non-constant* time. +// +// NOTE: The point must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func DoubleNonConst(p, result *JacobianPoint) { + // Doubling the point at infinity is still infinity. + if p.Y.IsZero() || p.Z.IsZero() { + result.X.SetInt(0) + result.Y.SetInt(0) + result.Z.SetInt(0) + return + } + + // Slightly faster point doubling can be achieved when the z value is 1 + // by avoiding the multiplication on the z value. This section calls + // a point doubling function which is accelerated by using that + // assumption when possible. + if p.Z.IsOne() { + doubleZ1EqualsOne(p, result) + return + } + + // Fall back to generic point doubling which works with arbitrary z + // values. + doubleGeneric(p, result) +} + +// mulAdd64 multiplies the two passed base 2^64 digits together, adds the given +// value to the result, and returns the 128-bit result via a (hi, lo) tuple +// where the upper half of the bits are returned in hi and the lower half in lo. +func mulAdd64(digit1, digit2, m uint64) (hi, lo uint64) { + // Note the carry on the final add is safe to discard because the maximum + // possible value is: + // (2^64 - 1)(2^64 - 1) + (2^64 - 1) = 2^128 - 2^64 + // and: + // 2^128 - 2^64 < 2^128. + var c uint64 + hi, lo = bits.Mul64(digit1, digit2) + lo, c = bits.Add64(lo, m, 0) + hi, _ = bits.Add64(hi, 0, c) + return hi, lo +} + +// mulAdd64Carry multiplies the two passed base 2^64 digits together, adds both +// the given value and carry to the result, and returns the 128-bit result via a +// (hi, lo) tuple where the upper half of the bits are returned in hi and the +// lower half in lo. +func mulAdd64Carry(digit1, digit2, m, c uint64) (hi, lo uint64) { + // Note the carry on the high order add is safe to discard because the + // maximum possible value is: + // (2^64 - 1)(2^64 - 1) + 2*(2^64 - 1) = 2^128 - 1 + // and: + // 2^128 - 1 < 2^128. + var c2 uint64 + hi, lo = mulAdd64(digit1, digit2, m) + lo, c2 = bits.Add64(lo, c, 0) + hi, _ = bits.Add64(hi, 0, c2) + return hi, lo +} + +// mul512Rsh320Round computes the full 512-bit product of the two given scalars, +// right shifts the result by 320 bits, rounds to the nearest integer, and +// returns the result in constant time. +// +// Note that despite the inputs and output being mod n scalars, the 512-bit +// product is NOT reduced mod N prior to the right shift. This is intentional +// because it is used for replacing division with multiplication and thus the +// intermediate results must be done via a field extension to a larger field. +func mul512Rsh320Round(n1, n2 *ModNScalar) ModNScalar { + // Convert n1 and n2 to base 2^64 digits. + n1Digit0 := uint64(n1.n[0]) | uint64(n1.n[1])<<32 + n1Digit1 := uint64(n1.n[2]) | uint64(n1.n[3])<<32 + n1Digit2 := uint64(n1.n[4]) | uint64(n1.n[5])<<32 + n1Digit3 := uint64(n1.n[6]) | uint64(n1.n[7])<<32 + n2Digit0 := uint64(n2.n[0]) | uint64(n2.n[1])<<32 + n2Digit1 := uint64(n2.n[2]) | uint64(n2.n[3])<<32 + n2Digit2 := uint64(n2.n[4]) | uint64(n2.n[5])<<32 + n2Digit3 := uint64(n2.n[6]) | uint64(n2.n[7])<<32 + + // Compute the full 512-bit product n1*n2. + var r0, r1, r2, r3, r4, r5, r6, r7, c uint64 + + // Terms resulting from the product of the first digit of the second number + // by all digits of the first number. + // + // Note that r0 is ignored because it is not needed to compute the higher + // terms and it is shifted out below anyway. + c, _ = bits.Mul64(n2Digit0, n1Digit0) + c, r1 = mulAdd64(n2Digit0, n1Digit1, c) + c, r2 = mulAdd64(n2Digit0, n1Digit2, c) + r4, r3 = mulAdd64(n2Digit0, n1Digit3, c) + + // Terms resulting from the product of the second digit of the second number + // by all digits of the first number. + // + // Note that r1 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit1, n1Digit0, r1) + c, r2 = mulAdd64Carry(n2Digit1, n1Digit1, r2, c) + c, r3 = mulAdd64Carry(n2Digit1, n1Digit2, r3, c) + r5, r4 = mulAdd64Carry(n2Digit1, n1Digit3, r4, c) + + // Terms resulting from the product of the third digit of the second number + // by all digits of the first number. + // + // Note that r2 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit2, n1Digit0, r2) + c, r3 = mulAdd64Carry(n2Digit2, n1Digit1, r3, c) + c, r4 = mulAdd64Carry(n2Digit2, n1Digit2, r4, c) + r6, r5 = mulAdd64Carry(n2Digit2, n1Digit3, r5, c) + + // Terms resulting from the product of the fourth digit of the second number + // by all digits of the first number. + // + // Note that r3 is ignored because it is no longer needed to compute the + // higher terms and it is shifted out below anyway. + c, _ = mulAdd64(n2Digit3, n1Digit0, r3) + c, r4 = mulAdd64Carry(n2Digit3, n1Digit1, r4, c) + c, r5 = mulAdd64Carry(n2Digit3, n1Digit2, r5, c) + r7, r6 = mulAdd64Carry(n2Digit3, n1Digit3, r6, c) + + // At this point the upper 256 bits of the full 512-bit product n1*n2 are in + // r4..r7 (recall the low order results were discarded as noted above). + // + // Right shift the result 320 bits. Note that the MSB of r4 determines + // whether or not to round because it is the final bit that is shifted out. + // + // Also, notice that r3..r7 would also ordinarily be set to 0 as well for + // the full shift, but that is skipped since they are no longer used as + // their values are known to be zero. + roundBit := r4 >> 63 + r2, r1, r0 = r7, r6, r5 + + // Conditionally add 1 depending on the round bit in constant time. + r0, c = bits.Add64(r0, roundBit, 0) + r1, c = bits.Add64(r1, 0, c) + r2, r3 = bits.Add64(r2, 0, c) + + // Finally, convert the result to a mod n scalar. + // + // No modular reduction is needed because the result is guaranteed to be + // less than the group order given the group order is > 2^255 and the + // maximum possible value of the result is 2^192. + var result ModNScalar + result.n[0] = uint32(r0) + result.n[1] = uint32(r0 >> 32) + result.n[2] = uint32(r1) + result.n[3] = uint32(r1 >> 32) + result.n[4] = uint32(r2) + result.n[5] = uint32(r2 >> 32) + result.n[6] = uint32(r3) + result.n[7] = uint32(r3 >> 32) + return result +} + +// splitK returns two scalars (k1 and k2) that are a balanced length-two +// representation of the provided scalar such that k ≡ k1 + k2*λ (mod N), where +// N is the secp256k1 group order. +func splitK(k *ModNScalar) (ModNScalar, ModNScalar) { + // The ultimate goal is to decompose k into two scalars that are around + // half the bit length of k such that the following equation is satisfied: + // + // k1 + k2*λ ≡ k (mod n) + // + // The strategy used here is based on algorithm 3.74 from [GECC] with a few + // modifications to make use of the more efficient mod n scalar type, avoid + // some costly long divisions, and minimize the number of calculations. + // + // Start by defining a function that takes a vector v = ∈ ℤ⨯ℤ: + // + // f(v) = a + bλ (mod n) + // + // Then, find two vectors, v1 = , and v2 = in ℤ⨯ℤ such that: + // 1) v1 and v2 are linearly independent + // 2) f(v1) = f(v2) = 0 + // 3) v1 and v2 have small Euclidean norm + // + // The vectors that satisfy these properties are found via the Euclidean + // algorithm and are precomputed since both n and λ are fixed values for the + // secp256k1 curve. See genprecomps.go for derivation details. + // + // Next, consider k as a vector in ℚ⨯ℚ and by linear algebra write: + // + // = g1*v1 + g2*v2, where g1, g2 ∈ ℚ + // + // Note that, per above, the components of vector v1 are a1 and b1 while the + // components of vector v2 are a2 and b2. Given the vectors v1 and v2 were + // generated such that a1*b2 - a2*b1 = n, solving the equation for g1 and g2 + // yields: + // + // g1 = b2*k / n + // g2 = -b1*k / n + // + // Observe: + // = g1*v1 + g2*v2 + // = (b2*k/n)* + (-b1*k/n)* | substitute + // = + <-a2*b1*k/n, -b2*b1*k/n> | scalar mul + // = | vector add + // = <[a1*b2*k - a2*b1*k]/n, 0> | simplify + // = | factor out k + // = | substitute + // = | simplify + // + // Now, consider an integer-valued vector v: + // + // v = c1*v1 + c2*v2, where c1, c2 ∈ ℤ (mod n) + // + // Since vectors v1 and v2 are linearly independent and were generated such + // that f(v1) = f(v2) = 0, all possible scalars c1 and c2 also produce a + // vector v such that f(v) = 0. + // + // In other words, c1 and c2 can be any integers and the resulting + // decomposition will still satisfy the required equation. However, since + // the goal is to produce a balanced decomposition that provides a + // performance advantage by minimizing max(k1, k2), c1 and c2 need to be + // integers close to g1 and g2, respectively, so the resulting vector v is + // an integer-valued vector that is close to . + // + // Finally, consider the vector u: + // + // u = - v + // + // It follows that f(u) = k and thus the two components of vector u satisfy + // the required equation: + // + // k1 + k2*λ ≡ k (mod n) + // + // Choosing c1 and c2: + // ------------------- + // + // As mentioned above, c1 and c2 need to be integers close to g1 and g2, + // respectively. The algorithm in [GECC] chooses the following values: + // + // c1 = round(g1) = round(b2*k / n) + // c2 = round(g2) = round(-b1*k / n) + // + // However, as section 3.4.2 of [STWS] notes, the aforementioned approach + // requires costly long divisions that can be avoided by precomputing + // rounded estimates as follows: + // + // t = bitlen(n) + 1 + // z1 = round(2^t * b2 / n) + // z2 = round(2^t * -b1 / n) + // + // Then, use those precomputed estimates to perform a multiplication by k + // along with a floored division by 2^t, which is a simple right shift by t: + // + // c1 = floor(k * z1 / 2^t) = (k * z1) >> t + // c2 = floor(k * z2 / 2^t) = (k * z2) >> t + // + // Finally, round up if last bit discarded in the right shift by t is set by + // adding 1. + // + // As a further optimization, rather than setting t = bitlen(n) + 1 = 257 as + // stated by [STWS], this implementation uses a higher precision estimate of + // t = bitlen(n) + 64 = 320 because it allows simplification of the shifts + // in the internal calculations that are done via uint64s and also allows + // the use of floor in the precomputations. + // + // Thus, the calculations this implementation uses are: + // + // z1 = floor(b2<<320 / n) | precomputed + // z2 = floor((-b1)<<320) / n) | precomputed + // c1 = ((k * z1) >> 320) + (((k * z1) >> 319) & 1) + // c2 = ((k * z2) >> 320) + (((k * z2) >> 319) & 1) + // + // Putting it all together: + // ------------------------ + // + // Calculate the following vectors using the values discussed above: + // + // v = c1*v1 + c2*v2 + // u = - v + // + // The two components of the resulting vector v are: + // va = c1*a1 + c2*a2 + // vb = c1*b1 + c2*b2 + // + // Thus, the two components of the resulting vector u are: + // k1 = k - va + // k2 = 0 - vb = -vb + // + // As some final optimizations: + // + // 1) Note that k1 + k2*λ ≡ k (mod n) means that k1 ≡ k - k2*λ (mod n). + // Therefore, the computation of va can be avoided to save two + // field multiplications and a field addition. + // + // 2) Since k1 = k - k2*λ = k + k2*(-λ), an additional field negation is + // saved by storing and using the negative version of λ. + // + // 3) Since k2 = -vb = -(c1*b1 + c2*b2) = c1*(-b1) + c2*(-b2), one more + // field negation is saved by storing and using the negative versions of + // b1 and b2. + // + // k2 = c1*(-b1) + c2*(-b2) + // k1 = k + k2*(-λ) + var k1, k2 ModNScalar + c1 := mul512Rsh320Round(k, endoZ1) + c2 := mul512Rsh320Round(k, endoZ2) + k2.Add2(c1.Mul(endoNegB1), c2.Mul(endoNegB2)) + k1.Mul2(&k2, endoNegLambda).Add(k) + return k1, k2 +} + +// nafScalar represents a positive integer up to a maximum value of 2^256 - 1 +// encoded in non-adjacent form. +// +// NAF is a signed-digit representation where each digit can be +1, 0, or -1. +// +// In order to efficiently encode that information, this type uses two arrays, a +// "positive" array where set bits represent the +1 signed digits and a +// "negative" array where set bits represent the -1 signed digits. 0 is +// represented by neither array having a bit set in that position. +// +// The Pos and Neg methods return the aforementioned positive and negative +// arrays, respectively. +type nafScalar struct { + // pos houses the positive portion of the representation. An additional + // byte is required for the positive portion because the NAF encoding can be + // up to 1 bit longer than the normal binary encoding of the value. + // + // neg houses the negative portion of the representation. Even though the + // additional byte is not required for the negative portion, since it can + // never exceed the length of the normal binary encoding of the value, + // keeping the same length for positive and negative portions simplifies + // working with the representation and allows extra conditional branches to + // be avoided. + // + // start and end specify the starting and ending index to use within the pos + // and neg arrays, respectively. This allows fixed size arrays to be used + // versus needing to dynamically allocate space on the heap. + // + // NOTE: The fields are defined in the order that they are to minimize the + // padding on 32-bit and 64-bit platforms. + pos [33]byte + start, end uint8 + neg [33]byte +} + +// Pos returns the bytes of the encoded value with bits set in the positions +// that represent a signed digit of +1. +func (s *nafScalar) Pos() []byte { + return s.pos[s.start:s.end] +} + +// Neg returns the bytes of the encoded value with bits set in the positions +// that represent a signed digit of -1. +func (s *nafScalar) Neg() []byte { + return s.neg[s.start:s.end] +} + +// naf takes a positive integer up to a maximum value of 2^256 - 1 and returns +// its non-adjacent form (NAF), which is a unique signed-digit representation +// such that no two consecutive digits are nonzero. See the documentation for +// the returned type for details on how the representation is encoded +// efficiently and how to interpret it +// +// NAF is useful in that it has the fewest nonzero digits of any signed digit +// representation, only 1/3rd of its digits are nonzero on average, and at least +// half of the digits will be 0. +// +// The aforementioned properties are particularly beneficial for optimizing +// elliptic curve point multiplication because they effectively minimize the +// number of required point additions in exchange for needing to perform a mix +// of fewer point additions and subtractions and possibly one additional point +// doubling. This is an excellent tradeoff because subtraction of points has +// the same computational complexity as addition of points and point doubling is +// faster than both. +func naf(k []byte) nafScalar { + // Strip leading zero bytes. + for len(k) > 0 && k[0] == 0x00 { + k = k[1:] + } + + // The non-adjacent form (NAF) of a positive integer k is an expression + // k = ∑_(i=0, l-1) k_i * 2^i where k_i ∈ {0,±1}, k_(l-1) != 0, and no two + // consecutive digits k_i are nonzero. + // + // The traditional method of computing the NAF of a positive integer is + // given by algorithm 3.30 in [GECC]. It consists of repeatedly dividing k + // by 2 and choosing the remainder so that the quotient (k−r)/2 is even + // which ensures the next NAF digit is 0. This requires log_2(k) steps. + // + // However, in [BRID], Prodinger notes that a closed form expression for the + // NAF representation is the bitwise difference 3k/2 - k/2. This is more + // efficient as it can be computed in O(1) versus the O(log(n)) of the + // traditional approach. + // + // The following code makes use of that formula to compute the NAF more + // efficiently. + // + // To understand the logic here, observe that the only way the NAF has a + // nonzero digit at a given bit is when either 3k/2 or k/2 has a bit set in + // that position, but not both. In other words, the result of a bitwise + // xor. This can be seen simply by considering that when the bits are the + // same, the subtraction is either 0-0 or 1-1, both of which are 0. + // + // Further, observe that the "+1" digits in the result are contributed by + // 3k/2 while the "-1" digits are from k/2. So, they can be determined by + // taking the bitwise and of each respective value with the result of the + // xor which identifies which bits are nonzero. + // + // Using that information, this loops backwards from the least significant + // byte to the most significant byte while performing the aforementioned + // calculations by propagating the potential carry and high order bit from + // the next word during the right shift. + kLen := len(k) + var result nafScalar + var carry uint8 + for byteNum := kLen - 1; byteNum >= 0; byteNum-- { + // Calculate k/2. Notice the carry from the previous word is added and + // the low order bit from the next word is shifted in accordingly. + kc := uint16(k[byteNum]) + uint16(carry) + var nextWord uint8 + if byteNum > 0 { + nextWord = k[byteNum-1] + } + halfK := kc>>1 | uint16(nextWord<<7) + + // Calculate 3k/2 and determine the non-zero digits in the result. + threeHalfK := kc + halfK + nonZeroResultDigits := threeHalfK ^ halfK + + // Determine the signed digits {0, ±1}. + result.pos[byteNum+1] = uint8(threeHalfK & nonZeroResultDigits) + result.neg[byteNum+1] = uint8(halfK & nonZeroResultDigits) + + // Propagate the potential carry from the 3k/2 calculation. + carry = uint8(threeHalfK >> 8) + } + result.pos[0] = carry + + // Set the starting and ending positions within the fixed size arrays to + // identify the bytes that are actually used. This is important since the + // encoding is big endian and thus trailing zero bytes changes its value. + result.start = 1 - carry + result.end = uint8(kLen + 1) + return result +} + +// ScalarMultNonConst multiplies k*P where k is a scalar modulo the curve order +// and P is a point in Jacobian projective coordinates and stores the result in +// the provided Jacobian point. +// +// NOTE: The point must be normalized for this function to return the correct +// result. The resulting point will be normalized. +func ScalarMultNonConst(k *ModNScalar, point, result *JacobianPoint) { + // ------------------------------------------------------------------------- + // This makes use of the following efficiently-computable endomorphism to + // accelerate the computation: + // + // φ(P) ⟼ λ*P = (β*P.x mod p, P.y) + // + // In other words, there is a special scalar λ that every point on the + // elliptic curve can be multiplied by that will result in the same point as + // performing a single field multiplication of the point's X coordinate by + // the special value β. + // + // This is useful because scalar point multiplication is significantly more + // expensive than a single field multiplication given the former involves a + // series of point doublings and additions which themselves consist of a + // combination of several field multiplications, squarings, and additions. + // + // So, the idea behind making use of the endomorphism is thus to decompose + // the scalar into two scalars that are each about half the bit length of + // the original scalar such that: + // + // k ≡ k1 + k2*λ (mod n) + // + // This in turn allows the scalar point multiplication to be performed as a + // sum of two smaller half-length multiplications as follows: + // + // k*P = (k1 + k2*λ)*P + // = k1*P + k2*λ*P + // = k1*P + k2*φ(P) + // + // Thus, a speedup is achieved so long as it's faster to decompose the + // scalar, compute φ(P), and perform a simultaneous multiply of the + // half-length point multiplications than it is to compute a full width + // point multiplication. + // + // In practice, benchmarks show the current implementation provides a + // speedup of around 30-35% versus not using the endomorphism. + // + // See section 3.5 in [GECC] for a more rigorous treatment. + // ------------------------------------------------------------------------- + + // Per above, the main equation here to remember is: + // k*P = k1*P + k2*φ(P) + // + // p1 below is P in the equation while p2 is φ(P) in the equation. + // + // NOTE: φ(x,y) = (β*x,y). The Jacobian z coordinates are the same, so this + // math goes through. + // + // Also, calculate -p1 and -p2 for use in the NAF optimization. + p1, p1Neg := new(JacobianPoint), new(JacobianPoint) + p1.Set(point) + p1Neg.Set(p1) + p1Neg.Y.Negate(1).Normalize() + p2, p2Neg := new(JacobianPoint), new(JacobianPoint) + p2.Set(p1) + p2.X.Mul(endoBeta).Normalize() + p2Neg.Set(p2) + p2Neg.Y.Negate(1).Normalize() + + // Decompose k into k1 and k2 such that k = k1 + k2*λ (mod n) where k1 and + // k2 are around half the bit length of k in order to halve the number of EC + // operations. + // + // Notice that this also flips the sign of the scalars and points as needed + // to minimize the bit lengths of the scalars k1 and k2. + // + // This is done because the scalars are operating modulo the group order + // which means that when they would otherwise be a small negative magnitude + // they will instead be a large positive magnitude. Since the goal is for + // the scalars to have a small magnitude to achieve a performance boost, use + // their negation when they are greater than the half order of the group and + // flip the positive and negative values of the corresponding point that + // will be multiplied by to compensate. + // + // In other words, transform the calc when k1 is over the half order to: + // k1*P = -k1*-P + // + // Similarly, transform the calc when k2 is over the half order to: + // k2*φ(P) = -k2*-φ(P) + k1, k2 := splitK(k) + if k1.IsOverHalfOrder() { + k1.Negate() + p1, p1Neg = p1Neg, p1 + } + if k2.IsOverHalfOrder() { + k2.Negate() + p2, p2Neg = p2Neg, p2 + } + + // Convert k1 and k2 into their NAF representations since NAF has a lot more + // zeros overall on average which minimizes the number of required point + // additions in exchange for a mix of fewer point additions and subtractions + // at the cost of one additional point doubling. + // + // This is an excellent tradeoff because subtraction of points has the same + // computational complexity as addition of points and point doubling is + // faster than both. + // + // Concretely, on average, 1/2 of all bits will be non-zero with the normal + // binary representation whereas only 1/3rd of the bits will be non-zero + // with NAF. + // + // The Pos version of the bytes contain the +1s and the Neg versions contain + // the -1s. + k1Bytes, k2Bytes := k1.Bytes(), k2.Bytes() + k1NAF, k2NAF := naf(k1Bytes[:]), naf(k2Bytes[:]) + k1PosNAF, k1NegNAF := k1NAF.Pos(), k1NAF.Neg() + k2PosNAF, k2NegNAF := k2NAF.Pos(), k2NAF.Neg() + k1Len, k2Len := len(k1PosNAF), len(k2PosNAF) + + // Add left-to-right using the NAF optimization. See algorithm 3.77 from + // [GECC]. + // + // Point Q = ∞ (point at infinity). + var q JacobianPoint + m := k1Len + if m < k2Len { + m = k2Len + } + for i := 0; i < m; i++ { + // Since k1 and k2 are potentially different lengths and the calculation + // is being done left to right, pad the front of the shorter one with + // 0s. + var k1BytePos, k1ByteNeg, k2BytePos, k2ByteNeg byte + if i >= m-k1Len { + k1BytePos, k1ByteNeg = k1PosNAF[i-(m-k1Len)], k1NegNAF[i-(m-k1Len)] + } + if i >= m-k2Len { + k2BytePos, k2ByteNeg = k2PosNAF[i-(m-k2Len)], k2NegNAF[i-(m-k2Len)] + } + + for mask := uint8(1 << 7); mask > 0; mask >>= 1 { + // Q = 2 * Q + DoubleNonConst(&q, &q) + + // Add or subtract the first point based on the signed digit of the + // NAF representation of k1 at this bit position. + // + // +1: Q = Q + p1 + // -1: Q = Q - p1 + // 0: Q = Q (no change) + if k1BytePos&mask == mask { + AddNonConst(&q, p1, &q) + } else if k1ByteNeg&mask == mask { + AddNonConst(&q, p1Neg, &q) + } + + // Add or subtract the second point based on the signed digit of the + // NAF representation of k2 at this bit position. + // + // +1: Q = Q + p2 + // -1: Q = Q - p2 + // 0: Q = Q (no change) + if k2BytePos&mask == mask { + AddNonConst(&q, p2, &q) + } else if k2ByteNeg&mask == mask { + AddNonConst(&q, p2Neg, &q) + } + } + } + + result.Set(&q) +} + +// ScalarBaseMultNonConst multiplies k*G where k is a scalar modulo the curve +// order and G is the base point of the group and stores the result in the +// provided Jacobian point. +// +// NOTE: The resulting point will be normalized. +func ScalarBaseMultNonConst(k *ModNScalar, result *JacobianPoint) { + bytePoints := s256BytePoints() + + // Start with the point at infinity. + result.X.Zero() + result.Y.Zero() + result.Z.Zero() + + // bytePoints has all 256 byte points for each 8-bit window. The strategy + // is to add up the byte points. This is best understood by expressing k in + // base-256 which it already sort of is. Each "digit" in the 8-bit window + // can be looked up using bytePoints and added together. + kb := k.Bytes() + for i := 0; i < len(kb); i++ { + pt := &bytePoints[i][kb[i]] + AddNonConst(result, pt, result) + } +} + +// isOnCurve returns whether or not the affine point (x,y) is on the curve. +func isOnCurve(fx, fy *FieldVal) bool { + // Elliptic curve equation for secp256k1 is: y^2 = x^3 + 7 + y2 := new(FieldVal).SquareVal(fy).Normalize() + result := new(FieldVal).SquareVal(fx).Mul(fx).AddInt(7).Normalize() + return y2.Equals(result) +} + +// DecompressY attempts to calculate the Y coordinate for the given X coordinate +// such that the result pair is a point on the secp256k1 curve. It adjusts Y +// based on the desired oddness and returns whether or not it was successful +// since not all X coordinates are valid. +// +// The magnitude of the provided X coordinate field val must be a max of 8 for a +// correct result. The resulting Y field val will have a max magnitude of 2. +func DecompressY(x *FieldVal, odd bool, resultY *FieldVal) bool { + // The curve equation for secp256k1 is: y^2 = x^3 + 7. Thus + // y = +-sqrt(x^3 + 7). + // + // The x coordinate must be invalid if there is no square root for the + // calculated rhs because it means the X coordinate is not for a point on + // the curve. + x3PlusB := new(FieldVal).SquareVal(x).Mul(x).AddInt(7) + if hasSqrt := resultY.SquareRootVal(x3PlusB); !hasSqrt { + return false + } + if resultY.Normalize().IsOdd() != odd { + resultY.Negate(1) + } + return true +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go new file mode 100644 index 0000000000..ac01e2343c --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/doc.go @@ -0,0 +1,59 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package secp256k1 implements optimized secp256k1 elliptic curve operations in +pure Go. + +This package provides an optimized pure Go implementation of elliptic curve +cryptography operations over the secp256k1 curve as well as data structures and +functions for working with public and private secp256k1 keys. See +https://www.secg.org/sec2-v2.pdf for details on the standard. + +In addition, sub packages are provided to produce, verify, parse, and serialize +ECDSA signatures and EC-Schnorr-DCRv0 (a custom Schnorr-based signature scheme +specific to Decred) signatures. See the README.md files in the relevant sub +packages for more details about those aspects. + +An overview of the features provided by this package are as follows: + + - Private key generation, serialization, and parsing + - Public key generation, serialization and parsing per ANSI X9.62-1998 + - Parses uncompressed, compressed, and hybrid public keys + - Serializes uncompressed and compressed public keys + - Specialized types for performing optimized and constant time field operations + - FieldVal type for working modulo the secp256k1 field prime + - ModNScalar type for working modulo the secp256k1 group order + - Elliptic curve operations in Jacobian projective coordinates + - Point addition + - Point doubling + - Scalar multiplication with an arbitrary point + - Scalar multiplication with the base point (group generator) + - Point decompression from a given x coordinate + - Nonce generation via RFC6979 with support for extra data and version + information that can be used to prevent nonce reuse between signing + algorithms + +It also provides an implementation of the Go standard library crypto/elliptic +Curve interface via the S256 function so that it may be used with other packages +in the standard library such as crypto/tls, crypto/x509, and crypto/ecdsa. +However, in the case of ECDSA, it is highly recommended to use the ecdsa sub +package of this package instead since it is optimized specifically for secp256k1 +and is significantly faster as a result. + +Although this package was primarily written for dcrd, it has intentionally been +designed so it can be used as a standalone package for any projects needing to +use optimized secp256k1 elliptic curve cryptography. + +Finally, a comprehensive suite of tests is provided to provide a high level of +quality assurance. + +# Use of secp256k1 in Decred + +At the time of this writing, the primary public key cryptography in widespread +use on the Decred network used to secure coins is based on elliptic curves +defined by the secp256k1 domain parameters. +*/ +package secp256k1 diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go new file mode 100644 index 0000000000..ebbdfc5411 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ecdh.go @@ -0,0 +1,21 @@ +// Copyright (c) 2015 The btcsuite developers +// Copyright (c) 2015-2016 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// GenerateSharedSecret generates a shared secret based on a private key and a +// public key using Diffie-Hellman key exchange (ECDH) (RFC 5903). +// RFC5903 Section 9 states we should only return x. +// +// It is recommended to securily hash the result before using as a cryptographic +// key. +func GenerateSharedSecret(privkey *PrivateKey, pubkey *PublicKey) []byte { + var point, result JacobianPoint + pubkey.AsJacobian(&point) + ScalarMultNonConst(&privkey.Key, &point, &result) + result.ToAffine() + xBytes := result.X.Bytes() + return xBytes[:] +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go new file mode 100644 index 0000000000..42022646b1 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/ellipticadaptor.go @@ -0,0 +1,255 @@ +// Copyright 2020-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "math/big" +) + +// CurveParams contains the parameters for the secp256k1 curve. +type CurveParams struct { + // P is the prime used in the secp256k1 field. + P *big.Int + + // N is the order of the secp256k1 curve group generated by the base point. + N *big.Int + + // Gx and Gy are the x and y coordinate of the base point, respectively. + Gx, Gy *big.Int + + // BitSize is the size of the underlying secp256k1 field in bits. + BitSize int + + // H is the cofactor of the secp256k1 curve. + H int + + // ByteSize is simply the bit size / 8 and is provided for convenience + // since it is calculated repeatedly. + ByteSize int +} + +// Curve parameters taken from [SECG] section 2.4.1. +var curveParams = CurveParams{ + P: fromHex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"), + N: fromHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"), + Gx: fromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"), + Gy: fromHex("483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"), + BitSize: 256, + H: 1, + ByteSize: 256 / 8, +} + +// Params returns the secp256k1 curve parameters for convenience. +func Params() *CurveParams { + return &curveParams +} + +// KoblitzCurve provides an implementation for secp256k1 that fits the ECC Curve +// interface from crypto/elliptic. +type KoblitzCurve struct { + *elliptic.CurveParams +} + +// bigAffineToJacobian takes an affine point (x, y) as big integers and converts +// it to Jacobian point with Z=1. +func bigAffineToJacobian(x, y *big.Int, result *JacobianPoint) { + result.X.SetByteSlice(x.Bytes()) + result.Y.SetByteSlice(y.Bytes()) + result.Z.SetInt(1) +} + +// jacobianToBigAffine takes a Jacobian point (x, y, z) as field values and +// converts it to an affine point as big integers. +func jacobianToBigAffine(point *JacobianPoint) (*big.Int, *big.Int) { + point.ToAffine() + + // Convert the field values for the now affine point to big.Ints. + x3, y3 := new(big.Int), new(big.Int) + x3.SetBytes(point.X.Bytes()[:]) + y3.SetBytes(point.Y.Bytes()[:]) + return x3, y3 +} + +// Params returns the parameters for the curve. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Params() *elliptic.CurveParams { + return curve.CurveParams +} + +// IsOnCurve returns whether or not the affine point (x,y) is on the curve. +// +// This is part of the elliptic.Curve interface implementation. This function +// differs from the crypto/elliptic algorithm since a = 0 not -3. +func (curve *KoblitzCurve) IsOnCurve(x, y *big.Int) bool { + // Convert big ints to a Jacobian point for faster arithmetic. + var point JacobianPoint + bigAffineToJacobian(x, y, &point) + return isOnCurve(&point.X, &point.Y) +} + +// Add returns the sum of (x1,y1) and (x2,y2). +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + // The point at infinity is the identity according to the group law for + // elliptic curve cryptography. Thus, ∞ + P = P and P + ∞ = P. + if x1.Sign() == 0 && y1.Sign() == 0 { + return x2, y2 + } + if x2.Sign() == 0 && y2.Sign() == 0 { + return x1, y1 + } + + // Convert the affine coordinates from big integers to Jacobian points, + // do the point addition in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var p1, p2, result JacobianPoint + bigAffineToJacobian(x1, y1, &p1) + bigAffineToJacobian(x2, y2, &p2) + AddNonConst(&p1, &p2, &result) + return jacobianToBigAffine(&result) +} + +// Double returns 2*(x1,y1). +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + if y1.Sign() == 0 { + return new(big.Int), new(big.Int) + } + + // Convert the affine coordinates from big integers to Jacobian points, + // do the point doubling in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var point, result JacobianPoint + bigAffineToJacobian(x1, y1, &point) + DoubleNonConst(&point, &result) + return jacobianToBigAffine(&result) +} + +// moduloReduce reduces k from more than 32 bytes to 32 bytes and under. This +// is done by doing a simple modulo curve.N. We can do this since G^N = 1 and +// thus any other valid point on the elliptic curve has the same order. +func moduloReduce(k []byte) []byte { + // Since the order of G is curve.N, we can use a much smaller number by + // doing modulo curve.N + if len(k) > curveParams.ByteSize { + tmpK := new(big.Int).SetBytes(k) + tmpK.Mod(tmpK, curveParams.N) + return tmpK.Bytes() + } + + return k +} + +// ScalarMult returns k*(Bx, By) where k is a big endian integer. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // Convert the affine coordinates from big integers to Jacobian points, + // do the multiplication in Jacobian projective space, and convert the + // Jacobian point back to affine big.Ints. + var kModN ModNScalar + kModN.SetByteSlice(moduloReduce(k)) + var point, result JacobianPoint + bigAffineToJacobian(Bx, By, &point) + ScalarMultNonConst(&kModN, &point, &result) + return jacobianToBigAffine(&result) +} + +// ScalarBaseMult returns k*G where G is the base point of the group and k is a +// big endian integer. +// +// This is part of the elliptic.Curve interface implementation. +func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + // Perform the multiplication and convert the Jacobian point back to affine + // big.Ints. + var kModN ModNScalar + kModN.SetByteSlice(moduloReduce(k)) + var result JacobianPoint + ScalarBaseMultNonConst(&kModN, &result) + return jacobianToBigAffine(&result) +} + +// X returns the x coordinate of the public key. +func (p *PublicKey) X() *big.Int { + return new(big.Int).SetBytes(p.x.Bytes()[:]) +} + +// Y returns the y coordinate of the public key. +func (p *PublicKey) Y() *big.Int { + return new(big.Int).SetBytes(p.y.Bytes()[:]) +} + +// ToECDSA returns the public key as a *ecdsa.PublicKey. +func (p *PublicKey) ToECDSA() *ecdsa.PublicKey { + return &ecdsa.PublicKey{ + Curve: S256(), + X: p.X(), + Y: p.Y(), + } +} + +// ToECDSA returns the private key as a *ecdsa.PrivateKey. +func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey { + var privKeyBytes [PrivKeyBytesLen]byte + p.Key.PutBytes(&privKeyBytes) + var result JacobianPoint + ScalarBaseMultNonConst(&p.Key, &result) + x, y := jacobianToBigAffine(&result) + newPrivKey := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: S256(), + X: x, + Y: y, + }, + D: new(big.Int).SetBytes(privKeyBytes[:]), + } + zeroArray32(&privKeyBytes) + return newPrivKey +} + +// fromHex converts the passed hex string into a big integer pointer and will +// panic is there is an error. This is only provided for the hard-coded +// constants so errors in the source code can bet detected. It will only (and +// must only) be called for initialization purposes. +func fromHex(s string) *big.Int { + if s == "" { + return big.NewInt(0) + } + r, ok := new(big.Int).SetString(s, 16) + if !ok { + panic("invalid hex in source file: " + s) + } + return r +} + +// secp256k1 is a global instance of the KoblitzCurve implementation which in +// turn embeds and implements elliptic.CurveParams. +var secp256k1 = &KoblitzCurve{ + CurveParams: &elliptic.CurveParams{ + P: curveParams.P, + N: curveParams.N, + B: fromHex("0000000000000000000000000000000000000000000000000000000000000007"), + Gx: curveParams.Gx, + Gy: curveParams.Gy, + BitSize: curveParams.BitSize, + Name: "secp256k1", + }, +} + +// S256 returns an elliptic.Curve which implements secp256k1. +func S256() *KoblitzCurve { + return secp256k1 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go new file mode 100644 index 0000000000..ac8c45127e --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/error.go @@ -0,0 +1,67 @@ +// Copyright (c) 2020 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// ErrorKind identifies a kind of error. It has full support for errors.Is and +// errors.As, so the caller can directly check against an error kind when +// determining the reason for an error. +type ErrorKind string + +// These constants are used to identify a specific RuleError. +const ( + // ErrPubKeyInvalidLen indicates that the length of a serialized public + // key is not one of the allowed lengths. + ErrPubKeyInvalidLen = ErrorKind("ErrPubKeyInvalidLen") + + // ErrPubKeyInvalidFormat indicates an attempt was made to parse a public + // key that does not specify one of the supported formats. + ErrPubKeyInvalidFormat = ErrorKind("ErrPubKeyInvalidFormat") + + // ErrPubKeyXTooBig indicates that the x coordinate for a public key + // is greater than or equal to the prime of the field underlying the group. + ErrPubKeyXTooBig = ErrorKind("ErrPubKeyXTooBig") + + // ErrPubKeyYTooBig indicates that the y coordinate for a public key is + // greater than or equal to the prime of the field underlying the group. + ErrPubKeyYTooBig = ErrorKind("ErrPubKeyYTooBig") + + // ErrPubKeyNotOnCurve indicates that a public key is not a point on the + // secp256k1 curve. + ErrPubKeyNotOnCurve = ErrorKind("ErrPubKeyNotOnCurve") + + // ErrPubKeyMismatchedOddness indicates that a hybrid public key specified + // an oddness of the y coordinate that does not match the actual oddness of + // the provided y coordinate. + ErrPubKeyMismatchedOddness = ErrorKind("ErrPubKeyMismatchedOddness") +) + +// Error satisfies the error interface and prints human-readable errors. +func (e ErrorKind) Error() string { + return string(e) +} + +// Error identifies an error related to public key cryptography using a +// sec256k1 curve. It has full support for errors.Is and errors.As, so the +// caller can ascertain the specific reason for the error by checking +// the underlying error. +type Error struct { + Err error + Description string +} + +// Error satisfies the error interface and prints human-readable errors. +func (e Error) Error() string { + return e.Description +} + +// Unwrap returns the underlying wrapped error. +func (e Error) Unwrap() error { + return e.Err +} + +// makeError creates an Error given a set of arguments. +func makeError(kind ErrorKind, desc string) Error { + return Error{Err: kind, Description: desc} +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go new file mode 100644 index 0000000000..8d9ac74d53 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/field.go @@ -0,0 +1,1681 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Copyright (c) 2013-2022 Dave Collins +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone. +// http://cacr.uwaterloo.ca/hac/ + +// All elliptic curve operations for secp256k1 are done in a finite field +// characterized by a 256-bit prime. Given this precision is larger than the +// biggest available native type, obviously some form of bignum math is needed. +// This package implements specialized fixed-precision field arithmetic rather +// than relying on an arbitrary-precision arithmetic package such as math/big +// for dealing with the field math since the size is known. As a result, rather +// large performance gains are achieved by taking advantage of many +// optimizations not available to arbitrary-precision arithmetic and generic +// modular arithmetic algorithms. +// +// There are various ways to internally represent each finite field element. +// For example, the most obvious representation would be to use an array of 4 +// uint64s (64 bits * 4 = 256 bits). However, that representation suffers from +// a couple of issues. First, there is no native Go type large enough to handle +// the intermediate results while adding or multiplying two 64-bit numbers, and +// second there is no space left for overflows when performing the intermediate +// arithmetic between each array element which would lead to expensive carry +// propagation. +// +// Given the above, this implementation represents the field elements as +// 10 uint32s with each word (array entry) treated as base 2^26. This was +// chosen for the following reasons: +// 1) Most systems at the current time are 64-bit (or at least have 64-bit +// registers available for specialized purposes such as MMX) so the +// intermediate results can typically be done using a native register (and +// using uint64s to avoid the need for additional half-word arithmetic) +// 2) In order to allow addition of the internal words without having to +// propagate the carry, the max normalized value for each register must +// be less than the number of bits available in the register +// 3) Since we're dealing with 32-bit values, 64-bits of overflow is a +// reasonable choice for #2 +// 4) Given the need for 256-bits of precision and the properties stated in #1, +// #2, and #3, the representation which best accommodates this is 10 uint32s +// with base 2^26 (26 bits * 10 = 260 bits, so the final word only needs 22 +// bits) which leaves the desired 64 bits (32 * 10 = 320, 320 - 256 = 64) for +// overflow +// +// Since it is so important that the field arithmetic is extremely fast for high +// performance crypto, this type does not perform any validation where it +// ordinarily would. See the documentation for FieldVal for more details. + +import ( + "encoding/hex" +) + +// Constants used to make the code more readable. +const ( + twoBitsMask = 0x3 + fourBitsMask = 0xf + sixBitsMask = 0x3f + eightBitsMask = 0xff +) + +// Constants related to the field representation. +const ( + // fieldWords is the number of words used to internally represent the + // 256-bit value. + fieldWords = 10 + + // fieldBase is the exponent used to form the numeric base of each word. + // 2^(fieldBase*i) where i is the word position. + fieldBase = 26 + + // fieldBaseMask is the mask for the bits in each word needed to + // represent the numeric base of each word (except the most significant + // word). + fieldBaseMask = (1 << fieldBase) - 1 + + // fieldMSBBits is the number of bits in the most significant word used + // to represent the value. + fieldMSBBits = 256 - (fieldBase * (fieldWords - 1)) + + // fieldMSBMask is the mask for the bits in the most significant word + // needed to represent the value. + fieldMSBMask = (1 << fieldMSBBits) - 1 + + // These fields provide convenient access to each of the words of the + // secp256k1 prime in the internal field representation to improve code + // readability. + fieldPrimeWordZero = 0x03fffc2f + fieldPrimeWordOne = 0x03ffffbf + fieldPrimeWordTwo = 0x03ffffff + fieldPrimeWordThree = 0x03ffffff + fieldPrimeWordFour = 0x03ffffff + fieldPrimeWordFive = 0x03ffffff + fieldPrimeWordSix = 0x03ffffff + fieldPrimeWordSeven = 0x03ffffff + fieldPrimeWordEight = 0x03ffffff + fieldPrimeWordNine = 0x003fffff +) + +// FieldVal implements optimized fixed-precision arithmetic over the +// secp256k1 finite field. This means all arithmetic is performed modulo +// +// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. +// +// WARNING: Since it is so important for the field arithmetic to be extremely +// fast for high performance crypto, this type does not perform any validation +// of documented preconditions where it ordinarily would. As a result, it is +// IMPERATIVE for callers to understand some key concepts that are described +// below and ensure the methods are called with the necessary preconditions that +// each method is documented with. For example, some methods only give the +// correct result if the field value is normalized and others require the field +// values involved to have a maximum magnitude and THERE ARE NO EXPLICIT CHECKS +// TO ENSURE THOSE PRECONDITIONS ARE SATISFIED. This does, unfortunately, make +// the type more difficult to use correctly and while I typically prefer to +// ensure all state and input is valid for most code, this is a bit of an +// exception because those extra checks really add up in what ends up being +// critical hot paths. +// +// The first key concept when working with this type is normalization. In order +// to avoid the need to propagate a ton of carries, the internal representation +// provides additional overflow bits for each word of the overall 256-bit value. +// This means that there are multiple internal representations for the same +// value and, as a result, any methods that rely on comparison of the value, +// such as equality and oddness determination, require the caller to provide a +// normalized value. +// +// The second key concept when working with this type is magnitude. As +// previously mentioned, the internal representation provides additional +// overflow bits which means that the more math operations that are performed on +// the field value between normalizations, the more those overflow bits +// accumulate. The magnitude is effectively that maximum possible number of +// those overflow bits that could possibly be required as a result of a given +// operation. Since there are only a limited number of overflow bits available, +// this implies that the max possible magnitude MUST be tracked by the caller +// and the caller MUST normalize the field value if a given operation would +// cause the magnitude of the result to exceed the max allowed value. +// +// IMPORTANT: The max allowed magnitude of a field value is 64. +type FieldVal struct { + // Each 256-bit value is represented as 10 32-bit integers in base 2^26. + // This provides 6 bits of overflow in each word (10 bits in the most + // significant word) for a total of 64 bits of overflow (9*6 + 10 = 64). It + // only implements the arithmetic needed for elliptic curve operations. + // + // The following depicts the internal representation: + // ----------------------------------------------------------------- + // | n[9] | n[8] | ... | n[0] | + // | 32 bits available | 32 bits available | ... | 32 bits available | + // | 22 bits for value | 26 bits for value | ... | 26 bits for value | + // | 10 bits overflow | 6 bits overflow | ... | 6 bits overflow | + // | Mult: 2^(26*9) | Mult: 2^(26*8) | ... | Mult: 2^(26*0) | + // ----------------------------------------------------------------- + // + // For example, consider the number 2^49 + 1. It would be represented as: + // n[0] = 1 + // n[1] = 2^23 + // n[2..9] = 0 + // + // The full 256-bit value is then calculated by looping i from 9..0 and + // doing sum(n[i] * 2^(26i)) like so: + // n[9] * 2^(26*9) = 0 * 2^234 = 0 + // n[8] * 2^(26*8) = 0 * 2^208 = 0 + // ... + // n[1] * 2^(26*1) = 2^23 * 2^26 = 2^49 + // n[0] * 2^(26*0) = 1 * 2^0 = 1 + // Sum: 0 + 0 + ... + 2^49 + 1 = 2^49 + 1 + n [10]uint32 +} + +// String returns the field value as a normalized human-readable hex string. +// +// Preconditions: None +// Output Normalized: Field is not modified -- same as input value +// Output Max Magnitude: Field is not modified -- same as input value +func (f FieldVal) String() string { + // f is a copy, so it's safe to normalize it without mutating the original. + f.Normalize() + return hex.EncodeToString(f.Bytes()[:]) +} + +// Zero sets the field value to zero in constant time. A newly created field +// value is already set to zero. This function can be useful to clear an +// existing field value for reuse. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) Zero() { + f.n[0] = 0 + f.n[1] = 0 + f.n[2] = 0 + f.n[3] = 0 + f.n[4] = 0 + f.n[5] = 0 + f.n[6] = 0 + f.n[7] = 0 + f.n[8] = 0 + f.n[9] = 0 +} + +// Set sets the field value equal to the passed value in constant time. The +// normalization and magnitude of the two fields will be identical. +// +// The field value is returned to support chaining. This enables syntax like: +// f := new(FieldVal).Set(f2).Add(1) so that f = f2 + 1 where f2 is not +// modified. +// +// Preconditions: None +// Output Normalized: Same as input value +// Output Max Magnitude: Same as input value +func (f *FieldVal) Set(val *FieldVal) *FieldVal { + *f = *val + return f +} + +// SetInt sets the field value to the passed integer in constant time. This is +// a convenience function since it is fairly common to perform some arithmetic +// with small native integers. +// +// The field value is returned to support chaining. This enables syntax such +// as f := new(FieldVal).SetInt(2).Mul(f2) so that f = 2 * f2. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) SetInt(ui uint16) *FieldVal { + f.Zero() + f.n[0] = uint32(ui) + return f +} + +// SetBytes packs the passed 32-byte big-endian value into the internal field +// value representation in constant time. SetBytes interprets the provided +// array as a 256-bit big-endian unsigned integer, packs it into the internal +// field value representation, and returns either 1 if it is greater than or +// equal to the field prime (aka it overflowed) or 0 otherwise in constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. +// +// Preconditions: None +// Output Normalized: Yes if no overflow, no otherwise +// Output Max Magnitude: 1 +func (f *FieldVal) SetBytes(b *[32]byte) uint32 { + // Pack the 256 total bits across the 10 uint32 words with a max of + // 26-bits per word. This could be done with a couple of for loops, + // but this unrolled version is significantly faster. Benchmarks show + // this is about 34 times faster than the variant which uses loops. + f.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 | + (uint32(b[28])&twoBitsMask)<<24 + f.n[1] = uint32(b[28])>>2 | uint32(b[27])<<6 | uint32(b[26])<<14 | + (uint32(b[25])&fourBitsMask)<<22 + f.n[2] = uint32(b[25])>>4 | uint32(b[24])<<4 | uint32(b[23])<<12 | + (uint32(b[22])&sixBitsMask)<<20 + f.n[3] = uint32(b[22])>>6 | uint32(b[21])<<2 | uint32(b[20])<<10 | + uint32(b[19])<<18 + f.n[4] = uint32(b[18]) | uint32(b[17])<<8 | uint32(b[16])<<16 | + (uint32(b[15])&twoBitsMask)<<24 + f.n[5] = uint32(b[15])>>2 | uint32(b[14])<<6 | uint32(b[13])<<14 | + (uint32(b[12])&fourBitsMask)<<22 + f.n[6] = uint32(b[12])>>4 | uint32(b[11])<<4 | uint32(b[10])<<12 | + (uint32(b[9])&sixBitsMask)<<20 + f.n[7] = uint32(b[9])>>6 | uint32(b[8])<<2 | uint32(b[7])<<10 | + uint32(b[6])<<18 + f.n[8] = uint32(b[5]) | uint32(b[4])<<8 | uint32(b[3])<<16 | + (uint32(b[2])&twoBitsMask)<<24 + f.n[9] = uint32(b[2])>>2 | uint32(b[1])<<6 | uint32(b[0])<<14 + + // The intuition here is that the field value is greater than the prime if + // one of the higher individual words is greater than corresponding word of + // the prime and all higher words in the field value are equal to their + // corresponding word of the prime. Since this type is modulo the prime, + // being equal is also an overflow back to 0. + // + // Note that because the input is 32 bytes and it was just packed into the + // field representation, the only words that can possibly be greater are + // zero and one, because ceil(log_2(2^256 - 1 - P)) = 33 bits max and the + // internal field representation encodes 26 bits with each word. + // + // Thus, there is no need to test if the upper words of the field value + // exceeds them, hence, only equality is checked for them. + highWordsEq := constantTimeEq(f.n[9], fieldPrimeWordNine) + highWordsEq &= constantTimeEq(f.n[8], fieldPrimeWordEight) + highWordsEq &= constantTimeEq(f.n[7], fieldPrimeWordSeven) + highWordsEq &= constantTimeEq(f.n[6], fieldPrimeWordSix) + highWordsEq &= constantTimeEq(f.n[5], fieldPrimeWordFive) + highWordsEq &= constantTimeEq(f.n[4], fieldPrimeWordFour) + highWordsEq &= constantTimeEq(f.n[3], fieldPrimeWordThree) + highWordsEq &= constantTimeEq(f.n[2], fieldPrimeWordTwo) + overflow := highWordsEq & constantTimeGreater(f.n[1], fieldPrimeWordOne) + highWordsEq &= constantTimeEq(f.n[1], fieldPrimeWordOne) + overflow |= highWordsEq & constantTimeGreaterOrEq(f.n[0], fieldPrimeWordZero) + + return overflow +} + +// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned +// integer (meaning it is truncated to the first 32 bytes), packs it into the +// internal field value representation, and returns whether or not the resulting +// truncated 256-bit integer is greater than or equal to the field prime (aka it +// overflowed) in constant time. +// +// Note that since passing a slice with more than 32 bytes is truncated, it is +// possible that the truncated value is less than the field prime and hence it +// will not be reported as having overflowed in that case. It is up to the +// caller to decide whether it needs to provide numbers of the appropriate size +// or it if is acceptable to use this function with the described truncation and +// overflow behavior. +// +// Preconditions: None +// Output Normalized: Yes if no overflow, no otherwise +// Output Max Magnitude: 1 +func (f *FieldVal) SetByteSlice(b []byte) bool { + var b32 [32]byte + b = b[:constantTimeMin(uint32(len(b)), 32)] + copy(b32[:], b32[:32-len(b)]) + copy(b32[32-len(b):], b) + result := f.SetBytes(&b32) + zeroArray32(&b32) + return result != 0 +} + +// Normalize normalizes the internal field words into the desired range and +// performs fast modular reduction over the secp256k1 prime by making use of the +// special form of the prime in constant time. +// +// Preconditions: None +// Output Normalized: Yes +// Output Max Magnitude: 1 +func (f *FieldVal) Normalize() *FieldVal { + // The field representation leaves 6 bits of overflow in each word so + // intermediate calculations can be performed without needing to + // propagate the carry to each higher word during the calculations. In + // order to normalize, we need to "compact" the full 256-bit value to + // the right while propagating any carries through to the high order + // word. + // + // Since this field is doing arithmetic modulo the secp256k1 prime, we + // also need to perform modular reduction over the prime. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // The algorithm presented in the referenced section typically repeats + // until the quotient is zero. However, due to our field representation + // we already know to within one reduction how many times we would need + // to repeat as it's the uppermost bits of the high order word. Thus we + // can simply multiply the magnitude by the field representation of the + // prime and do a single iteration. After this step there might be an + // additional carry to bit 256 (bit 22 of the high order word). + t9 := f.n[9] + m := t9 >> fieldMSBBits + t9 = t9 & fieldMSBMask + t0 := f.n[0] + m*977 + t1 := (t0 >> fieldBase) + f.n[1] + (m << 6) + t0 = t0 & fieldBaseMask + t2 := (t1 >> fieldBase) + f.n[2] + t1 = t1 & fieldBaseMask + t3 := (t2 >> fieldBase) + f.n[3] + t2 = t2 & fieldBaseMask + t4 := (t3 >> fieldBase) + f.n[4] + t3 = t3 & fieldBaseMask + t5 := (t4 >> fieldBase) + f.n[5] + t4 = t4 & fieldBaseMask + t6 := (t5 >> fieldBase) + f.n[6] + t5 = t5 & fieldBaseMask + t7 := (t6 >> fieldBase) + f.n[7] + t6 = t6 & fieldBaseMask + t8 := (t7 >> fieldBase) + f.n[8] + t7 = t7 & fieldBaseMask + t9 = (t8 >> fieldBase) + t9 + t8 = t8 & fieldBaseMask + + // At this point, the magnitude is guaranteed to be one, however, the + // value could still be greater than the prime if there was either a + // carry through to bit 256 (bit 22 of the higher order word) or the + // value is greater than or equal to the field characteristic. The + // following determines if either or these conditions are true and does + // the final reduction in constant time. + // + // Also note that 'm' will be zero when neither of the aforementioned + // conditions are true and the value will not be changed when 'm' is zero. + m = constantTimeEq(t9, fieldMSBMask) + m &= constantTimeEq(t8&t7&t6&t5&t4&t3&t2, fieldBaseMask) + m &= constantTimeGreater(t1+64+((t0+977)>>fieldBase), fieldBaseMask) + m |= t9 >> fieldMSBBits + t0 = t0 + m*977 + t1 = (t0 >> fieldBase) + t1 + (m << 6) + t0 = t0 & fieldBaseMask + t2 = (t1 >> fieldBase) + t2 + t1 = t1 & fieldBaseMask + t3 = (t2 >> fieldBase) + t3 + t2 = t2 & fieldBaseMask + t4 = (t3 >> fieldBase) + t4 + t3 = t3 & fieldBaseMask + t5 = (t4 >> fieldBase) + t5 + t4 = t4 & fieldBaseMask + t6 = (t5 >> fieldBase) + t6 + t5 = t5 & fieldBaseMask + t7 = (t6 >> fieldBase) + t7 + t6 = t6 & fieldBaseMask + t8 = (t7 >> fieldBase) + t8 + t7 = t7 & fieldBaseMask + t9 = (t8 >> fieldBase) + t9 + t8 = t8 & fieldBaseMask + t9 = t9 & fieldMSBMask // Remove potential multiple of 2^256. + + // Finally, set the normalized and reduced words. + f.n[0] = t0 + f.n[1] = t1 + f.n[2] = t2 + f.n[3] = t3 + f.n[4] = t4 + f.n[5] = t5 + f.n[6] = t6 + f.n[7] = t7 + f.n[8] = t8 + f.n[9] = t9 + return f +} + +// PutBytesUnchecked unpacks the field value to a 32-byte big-endian value +// directly into the passed byte slice in constant time. The target slice must +// must have at least 32 bytes available or it will panic. +// +// There is a similar function, PutBytes, which unpacks the field value into a +// 32-byte array directly. This version is provided since it can be useful +// to write directly into part of a larger buffer without needing a separate +// allocation. +// +// Preconditions: +// - The field value MUST be normalized +// - The target slice MUST have at least 32 bytes available +func (f *FieldVal) PutBytesUnchecked(b []byte) { + // Unpack the 256 total bits from the 10 uint32 words with a max of + // 26-bits per word. This could be done with a couple of for loops, + // but this unrolled version is a bit faster. Benchmarks show this is + // about 10 times faster than the variant which uses loops. + b[31] = byte(f.n[0] & eightBitsMask) + b[30] = byte((f.n[0] >> 8) & eightBitsMask) + b[29] = byte((f.n[0] >> 16) & eightBitsMask) + b[28] = byte((f.n[0]>>24)&twoBitsMask | (f.n[1]&sixBitsMask)<<2) + b[27] = byte((f.n[1] >> 6) & eightBitsMask) + b[26] = byte((f.n[1] >> 14) & eightBitsMask) + b[25] = byte((f.n[1]>>22)&fourBitsMask | (f.n[2]&fourBitsMask)<<4) + b[24] = byte((f.n[2] >> 4) & eightBitsMask) + b[23] = byte((f.n[2] >> 12) & eightBitsMask) + b[22] = byte((f.n[2]>>20)&sixBitsMask | (f.n[3]&twoBitsMask)<<6) + b[21] = byte((f.n[3] >> 2) & eightBitsMask) + b[20] = byte((f.n[3] >> 10) & eightBitsMask) + b[19] = byte((f.n[3] >> 18) & eightBitsMask) + b[18] = byte(f.n[4] & eightBitsMask) + b[17] = byte((f.n[4] >> 8) & eightBitsMask) + b[16] = byte((f.n[4] >> 16) & eightBitsMask) + b[15] = byte((f.n[4]>>24)&twoBitsMask | (f.n[5]&sixBitsMask)<<2) + b[14] = byte((f.n[5] >> 6) & eightBitsMask) + b[13] = byte((f.n[5] >> 14) & eightBitsMask) + b[12] = byte((f.n[5]>>22)&fourBitsMask | (f.n[6]&fourBitsMask)<<4) + b[11] = byte((f.n[6] >> 4) & eightBitsMask) + b[10] = byte((f.n[6] >> 12) & eightBitsMask) + b[9] = byte((f.n[6]>>20)&sixBitsMask | (f.n[7]&twoBitsMask)<<6) + b[8] = byte((f.n[7] >> 2) & eightBitsMask) + b[7] = byte((f.n[7] >> 10) & eightBitsMask) + b[6] = byte((f.n[7] >> 18) & eightBitsMask) + b[5] = byte(f.n[8] & eightBitsMask) + b[4] = byte((f.n[8] >> 8) & eightBitsMask) + b[3] = byte((f.n[8] >> 16) & eightBitsMask) + b[2] = byte((f.n[8]>>24)&twoBitsMask | (f.n[9]&sixBitsMask)<<2) + b[1] = byte((f.n[9] >> 6) & eightBitsMask) + b[0] = byte((f.n[9] >> 14) & eightBitsMask) +} + +// PutBytes unpacks the field value to a 32-byte big-endian value using the +// passed byte array in constant time. +// +// There is a similar function, PutBytesUnchecked, which unpacks the field value +// into a slice that must have at least 32 bytes available. This version is +// provided since it can be useful to write directly into an array that is type +// checked. +// +// Alternatively, there is also Bytes, which unpacks the field value into a new +// array and returns that which can sometimes be more ergonomic in applications +// that aren't concerned about an additional copy. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) PutBytes(b *[32]byte) { + f.PutBytesUnchecked(b[:]) +} + +// Bytes unpacks the field value to a 32-byte big-endian value in constant time. +// +// See PutBytes and PutBytesUnchecked for variants that allow an array or slice +// to be passed which can be useful to cut down on the number of allocations by +// allowing the caller to reuse a buffer or write directly into part of a larger +// buffer. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) Bytes() *[32]byte { + b := new([32]byte) + f.PutBytesUnchecked(b[:]) + return b +} + +// IsZeroBit returns 1 when the field value is equal to zero or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsZero for the version that returns +// a bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsZeroBit() uint32 { + // The value can only be zero if no bits are set in any of the words. + // This is a constant time implementation. + bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] | + f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return constantTimeEq(bits, 0) +} + +// IsZero returns whether or not the field value is equal to zero in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsZero() bool { + // The value can only be zero if no bits are set in any of the words. + // This is a constant time implementation. + bits := f.n[0] | f.n[1] | f.n[2] | f.n[3] | f.n[4] | + f.n[5] | f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return bits == 0 +} + +// IsOneBit returns 1 when the field value is equal to one or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsOne for the version that returns a +// bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOneBit() uint32 { + // The value can only be one if the single lowest significant bit is set in + // the first word and no other bits are set in any of the other words. + // This is a constant time implementation. + bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] | + f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return constantTimeEq(bits, 0) +} + +// IsOne returns whether or not the field value is equal to one in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOne() bool { + // The value can only be one if the single lowest significant bit is set in + // the first word and no other bits are set in any of the other words. + // This is a constant time implementation. + bits := (f.n[0] ^ 1) | f.n[1] | f.n[2] | f.n[3] | f.n[4] | f.n[5] | + f.n[6] | f.n[7] | f.n[8] | f.n[9] + + return bits == 0 +} + +// IsOddBit returns 1 when the field value is an odd number or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsOdd for the version that returns a +// bool. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOddBit() uint32 { + // Only odd numbers have the bottom bit set. + return f.n[0] & 1 +} + +// IsOdd returns whether or not the field value is an odd number in constant +// time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsOdd() bool { + // Only odd numbers have the bottom bit set. + return f.n[0]&1 == 1 +} + +// Equals returns whether or not the two field values are the same in constant +// time. +// +// Preconditions: +// - Both field values being compared MUST be normalized +func (f *FieldVal) Equals(val *FieldVal) bool { + // Xor only sets bits when they are different, so the two field values + // can only be the same if no bits are set after xoring each word. + // This is a constant time implementation. + bits := (f.n[0] ^ val.n[0]) | (f.n[1] ^ val.n[1]) | (f.n[2] ^ val.n[2]) | + (f.n[3] ^ val.n[3]) | (f.n[4] ^ val.n[4]) | (f.n[5] ^ val.n[5]) | + (f.n[6] ^ val.n[6]) | (f.n[7] ^ val.n[7]) | (f.n[8] ^ val.n[8]) | + (f.n[9] ^ val.n[9]) + + return bits == 0 +} + +// NegateVal negates the passed value and stores the result in f in constant +// time. The caller must provide the magnitude of the passed value for a +// correct result. +// +// The field value is returned to support chaining. This enables syntax like: +// f.NegateVal(f2).AddInt(1) so that f = -f2 + 1. +// +// Preconditions: +// - The max magnitude MUST be 63 +// Output Normalized: No +// Output Max Magnitude: Input magnitude + 1 +func (f *FieldVal) NegateVal(val *FieldVal, magnitude uint32) *FieldVal { + // Negation in the field is just the prime minus the value. However, + // in order to allow negation against a field value without having to + // normalize/reduce it first, multiply by the magnitude (that is how + // "far" away it is from the normalized value) to adjust. Also, since + // negating a value pushes it one more order of magnitude away from the + // normalized range, add 1 to compensate. + // + // For some intuition here, imagine you're performing mod 12 arithmetic + // (picture a clock) and you are negating the number 7. So you start at + // 12 (which is of course 0 under mod 12) and count backwards (left on + // the clock) 7 times to arrive at 5. Notice this is just 12-7 = 5. + // Now, assume you're starting with 19, which is a number that is + // already larger than the modulus and congruent to 7 (mod 12). When a + // value is already in the desired range, its magnitude is 1. Since 19 + // is an additional "step", its magnitude (mod 12) is 2. Since any + // multiple of the modulus is congruent to zero (mod m), the answer can + // be shortcut by simply multiplying the magnitude by the modulus and + // subtracting. Keeping with the example, this would be (2*12)-19 = 5. + f.n[0] = (magnitude+1)*fieldPrimeWordZero - val.n[0] + f.n[1] = (magnitude+1)*fieldPrimeWordOne - val.n[1] + f.n[2] = (magnitude+1)*fieldBaseMask - val.n[2] + f.n[3] = (magnitude+1)*fieldBaseMask - val.n[3] + f.n[4] = (magnitude+1)*fieldBaseMask - val.n[4] + f.n[5] = (magnitude+1)*fieldBaseMask - val.n[5] + f.n[6] = (magnitude+1)*fieldBaseMask - val.n[6] + f.n[7] = (magnitude+1)*fieldBaseMask - val.n[7] + f.n[8] = (magnitude+1)*fieldBaseMask - val.n[8] + f.n[9] = (magnitude+1)*fieldMSBMask - val.n[9] + + return f +} + +// Negate negates the field value in constant time. The existing field value is +// modified. The caller must provide the magnitude of the field value for a +// correct result. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Negate().AddInt(1) so that f = -f + 1. +// +// Preconditions: +// - The max magnitude MUST be 63 +// Output Normalized: No +// Output Max Magnitude: Input magnitude + 1 +func (f *FieldVal) Negate(magnitude uint32) *FieldVal { + return f.NegateVal(f, magnitude) +} + +// AddInt adds the passed integer to the existing field value and stores the +// result in f in constant time. This is a convenience function since it is +// fairly common to perform some arithmetic with small native integers. +// +// The field value is returned to support chaining. This enables syntax like: +// f.AddInt(1).Add(f2) so that f = f + 1 + f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 63 +// Output Normalized: No +// Output Max Magnitude: Existing field magnitude + 1 +func (f *FieldVal) AddInt(ui uint16) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // the word and will be normalized out. + f.n[0] += uint32(ui) + + return f +} + +// Add adds the passed value to the existing field value and stores the result +// in f in constant time. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Add(f2).AddInt(1) so that f = f + f2 + 1. +// +// Preconditions: +// - The sum of the magnitudes of the two field values MUST be a max of 64 +// Output Normalized: No +// Output Max Magnitude: Sum of the magnitude of the two individual field values +func (f *FieldVal) Add(val *FieldVal) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // each word and will be normalized out. This could obviously be done + // in a loop, but the unrolled version is faster. + f.n[0] += val.n[0] + f.n[1] += val.n[1] + f.n[2] += val.n[2] + f.n[3] += val.n[3] + f.n[4] += val.n[4] + f.n[5] += val.n[5] + f.n[6] += val.n[6] + f.n[7] += val.n[7] + f.n[8] += val.n[8] + f.n[9] += val.n[9] + + return f +} + +// Add2 adds the passed two field values together and stores the result in f in +// constant time. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.Add2(f, f2).AddInt(1) so that f3 = f + f2 + 1. +// +// Preconditions: +// - The sum of the magnitudes of the two field values MUST be a max of 64 +// Output Normalized: No +// Output Max Magnitude: Sum of the magnitude of the two field values +func (f *FieldVal) Add2(val *FieldVal, val2 *FieldVal) *FieldVal { + // Since the field representation intentionally provides overflow bits, + // it's ok to use carryless addition as the carry bit is safely part of + // each word and will be normalized out. This could obviously be done + // in a loop, but the unrolled version is faster. + f.n[0] = val.n[0] + val2.n[0] + f.n[1] = val.n[1] + val2.n[1] + f.n[2] = val.n[2] + val2.n[2] + f.n[3] = val.n[3] + val2.n[3] + f.n[4] = val.n[4] + val2.n[4] + f.n[5] = val.n[5] + val2.n[5] + f.n[6] = val.n[6] + val2.n[6] + f.n[7] = val.n[7] + val2.n[7] + f.n[8] = val.n[8] + val2.n[8] + f.n[9] = val.n[9] + val2.n[9] + + return f +} + +// MulInt multiplies the field value by the passed int and stores the result in +// f in constant time. Note that this function can overflow if multiplying the +// value by any of the individual words exceeds a max uint32. Therefore it is +// important that the caller ensures no overflows will occur before using this +// function. +// +// The field value is returned to support chaining. This enables syntax like: +// f.MulInt(2).Add(f2) so that f = 2 * f + f2. +// +// Preconditions: +// - The field value magnitude multiplied by given val MUST be a max of 64 +// Output Normalized: No +// Output Max Magnitude: Existing field magnitude times the provided integer val +func (f *FieldVal) MulInt(val uint8) *FieldVal { + // Since each word of the field representation can hold up to + // 32 - fieldBase extra bits which will be normalized out, it's safe + // to multiply each word without using a larger type or carry + // propagation so long as the values won't overflow a uint32. This + // could obviously be done in a loop, but the unrolled version is + // faster. + ui := uint32(val) + f.n[0] *= ui + f.n[1] *= ui + f.n[2] *= ui + f.n[3] *= ui + f.n[4] *= ui + f.n[5] *= ui + f.n[6] *= ui + f.n[7] *= ui + f.n[8] *= ui + f.n[9] *= ui + + return f +} + +// Mul multiplies the passed value to the existing field value and stores the +// result in f in constant time. Note that this function can overflow if +// multiplying any of the individual words exceeds a max uint32. In practice, +// this means the magnitude of either value involved in the multiplication must +// be a max of 8. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Mul(f2).AddInt(1) so that f = (f * f2) + 1. +// +// Preconditions: +// - Both field values MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Mul(val *FieldVal) *FieldVal { + return f.Mul2(f, val) +} + +// Mul2 multiplies the passed two field values together and stores the result +// result in f in constant time. Note that this function can overflow if +// multiplying any of the individual words exceeds a max uint32. In practice, +// this means the magnitude of either value involved in the multiplication must +// be a max of 8. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.Mul2(f, f2).AddInt(1) so that f3 = (f * f2) + 1. +// +// Preconditions: +// - Both input field values MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Mul2(val *FieldVal, val2 *FieldVal) *FieldVal { + // This could be done with a couple of for loops and an array to store + // the intermediate terms, but this unrolled version is significantly + // faster. + + // Terms for 2^(fieldBase*0). + m := uint64(val.n[0]) * uint64(val2.n[0]) + t0 := m & fieldBaseMask + + // Terms for 2^(fieldBase*1). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[1]) + + uint64(val.n[1])*uint64(val2.n[0]) + t1 := m & fieldBaseMask + + // Terms for 2^(fieldBase*2). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[2]) + + uint64(val.n[1])*uint64(val2.n[1]) + + uint64(val.n[2])*uint64(val2.n[0]) + t2 := m & fieldBaseMask + + // Terms for 2^(fieldBase*3). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[3]) + + uint64(val.n[1])*uint64(val2.n[2]) + + uint64(val.n[2])*uint64(val2.n[1]) + + uint64(val.n[3])*uint64(val2.n[0]) + t3 := m & fieldBaseMask + + // Terms for 2^(fieldBase*4). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[4]) + + uint64(val.n[1])*uint64(val2.n[3]) + + uint64(val.n[2])*uint64(val2.n[2]) + + uint64(val.n[3])*uint64(val2.n[1]) + + uint64(val.n[4])*uint64(val2.n[0]) + t4 := m & fieldBaseMask + + // Terms for 2^(fieldBase*5). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[5]) + + uint64(val.n[1])*uint64(val2.n[4]) + + uint64(val.n[2])*uint64(val2.n[3]) + + uint64(val.n[3])*uint64(val2.n[2]) + + uint64(val.n[4])*uint64(val2.n[1]) + + uint64(val.n[5])*uint64(val2.n[0]) + t5 := m & fieldBaseMask + + // Terms for 2^(fieldBase*6). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[6]) + + uint64(val.n[1])*uint64(val2.n[5]) + + uint64(val.n[2])*uint64(val2.n[4]) + + uint64(val.n[3])*uint64(val2.n[3]) + + uint64(val.n[4])*uint64(val2.n[2]) + + uint64(val.n[5])*uint64(val2.n[1]) + + uint64(val.n[6])*uint64(val2.n[0]) + t6 := m & fieldBaseMask + + // Terms for 2^(fieldBase*7). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[7]) + + uint64(val.n[1])*uint64(val2.n[6]) + + uint64(val.n[2])*uint64(val2.n[5]) + + uint64(val.n[3])*uint64(val2.n[4]) + + uint64(val.n[4])*uint64(val2.n[3]) + + uint64(val.n[5])*uint64(val2.n[2]) + + uint64(val.n[6])*uint64(val2.n[1]) + + uint64(val.n[7])*uint64(val2.n[0]) + t7 := m & fieldBaseMask + + // Terms for 2^(fieldBase*8). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[8]) + + uint64(val.n[1])*uint64(val2.n[7]) + + uint64(val.n[2])*uint64(val2.n[6]) + + uint64(val.n[3])*uint64(val2.n[5]) + + uint64(val.n[4])*uint64(val2.n[4]) + + uint64(val.n[5])*uint64(val2.n[3]) + + uint64(val.n[6])*uint64(val2.n[2]) + + uint64(val.n[7])*uint64(val2.n[1]) + + uint64(val.n[8])*uint64(val2.n[0]) + t8 := m & fieldBaseMask + + // Terms for 2^(fieldBase*9). + m = (m >> fieldBase) + + uint64(val.n[0])*uint64(val2.n[9]) + + uint64(val.n[1])*uint64(val2.n[8]) + + uint64(val.n[2])*uint64(val2.n[7]) + + uint64(val.n[3])*uint64(val2.n[6]) + + uint64(val.n[4])*uint64(val2.n[5]) + + uint64(val.n[5])*uint64(val2.n[4]) + + uint64(val.n[6])*uint64(val2.n[3]) + + uint64(val.n[7])*uint64(val2.n[2]) + + uint64(val.n[8])*uint64(val2.n[1]) + + uint64(val.n[9])*uint64(val2.n[0]) + t9 := m & fieldBaseMask + + // Terms for 2^(fieldBase*10). + m = (m >> fieldBase) + + uint64(val.n[1])*uint64(val2.n[9]) + + uint64(val.n[2])*uint64(val2.n[8]) + + uint64(val.n[3])*uint64(val2.n[7]) + + uint64(val.n[4])*uint64(val2.n[6]) + + uint64(val.n[5])*uint64(val2.n[5]) + + uint64(val.n[6])*uint64(val2.n[4]) + + uint64(val.n[7])*uint64(val2.n[3]) + + uint64(val.n[8])*uint64(val2.n[2]) + + uint64(val.n[9])*uint64(val2.n[1]) + t10 := m & fieldBaseMask + + // Terms for 2^(fieldBase*11). + m = (m >> fieldBase) + + uint64(val.n[2])*uint64(val2.n[9]) + + uint64(val.n[3])*uint64(val2.n[8]) + + uint64(val.n[4])*uint64(val2.n[7]) + + uint64(val.n[5])*uint64(val2.n[6]) + + uint64(val.n[6])*uint64(val2.n[5]) + + uint64(val.n[7])*uint64(val2.n[4]) + + uint64(val.n[8])*uint64(val2.n[3]) + + uint64(val.n[9])*uint64(val2.n[2]) + t11 := m & fieldBaseMask + + // Terms for 2^(fieldBase*12). + m = (m >> fieldBase) + + uint64(val.n[3])*uint64(val2.n[9]) + + uint64(val.n[4])*uint64(val2.n[8]) + + uint64(val.n[5])*uint64(val2.n[7]) + + uint64(val.n[6])*uint64(val2.n[6]) + + uint64(val.n[7])*uint64(val2.n[5]) + + uint64(val.n[8])*uint64(val2.n[4]) + + uint64(val.n[9])*uint64(val2.n[3]) + t12 := m & fieldBaseMask + + // Terms for 2^(fieldBase*13). + m = (m >> fieldBase) + + uint64(val.n[4])*uint64(val2.n[9]) + + uint64(val.n[5])*uint64(val2.n[8]) + + uint64(val.n[6])*uint64(val2.n[7]) + + uint64(val.n[7])*uint64(val2.n[6]) + + uint64(val.n[8])*uint64(val2.n[5]) + + uint64(val.n[9])*uint64(val2.n[4]) + t13 := m & fieldBaseMask + + // Terms for 2^(fieldBase*14). + m = (m >> fieldBase) + + uint64(val.n[5])*uint64(val2.n[9]) + + uint64(val.n[6])*uint64(val2.n[8]) + + uint64(val.n[7])*uint64(val2.n[7]) + + uint64(val.n[8])*uint64(val2.n[6]) + + uint64(val.n[9])*uint64(val2.n[5]) + t14 := m & fieldBaseMask + + // Terms for 2^(fieldBase*15). + m = (m >> fieldBase) + + uint64(val.n[6])*uint64(val2.n[9]) + + uint64(val.n[7])*uint64(val2.n[8]) + + uint64(val.n[8])*uint64(val2.n[7]) + + uint64(val.n[9])*uint64(val2.n[6]) + t15 := m & fieldBaseMask + + // Terms for 2^(fieldBase*16). + m = (m >> fieldBase) + + uint64(val.n[7])*uint64(val2.n[9]) + + uint64(val.n[8])*uint64(val2.n[8]) + + uint64(val.n[9])*uint64(val2.n[7]) + t16 := m & fieldBaseMask + + // Terms for 2^(fieldBase*17). + m = (m >> fieldBase) + + uint64(val.n[8])*uint64(val2.n[9]) + + uint64(val.n[9])*uint64(val2.n[8]) + t17 := m & fieldBaseMask + + // Terms for 2^(fieldBase*18). + m = (m >> fieldBase) + uint64(val.n[9])*uint64(val2.n[9]) + t18 := m & fieldBaseMask + + // What's left is for 2^(fieldBase*19). + t19 := m >> fieldBase + + // At this point, all of the terms are grouped into their respective + // base. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved per the provided algorithm. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // Since each word is in base 26, the upper terms (t10 and up) start + // at 260 bits (versus the final desired range of 256 bits), so the + // field representation of 'c' from above needs to be adjusted for the + // extra 4 bits by multiplying it by 2^4 = 16. 4294968273 * 16 = + // 68719492368. Thus, the adjusted field representation of 'c' is: + // n[0] = 977 * 16 = 15632 + // n[1] = 64 * 16 = 1024 + // That is to say (2^26 * 1024) + 15632 = 68719492368 + // + // To reduce the final term, t19, the entire 'c' value is needed instead + // of only n[0] because there are no more terms left to handle n[1]. + // This means there might be some magnitude left in the upper bits that + // is handled below. + m = t0 + t10*15632 + t0 = m & fieldBaseMask + m = (m >> fieldBase) + t1 + t10*1024 + t11*15632 + t1 = m & fieldBaseMask + m = (m >> fieldBase) + t2 + t11*1024 + t12*15632 + t2 = m & fieldBaseMask + m = (m >> fieldBase) + t3 + t12*1024 + t13*15632 + t3 = m & fieldBaseMask + m = (m >> fieldBase) + t4 + t13*1024 + t14*15632 + t4 = m & fieldBaseMask + m = (m >> fieldBase) + t5 + t14*1024 + t15*15632 + t5 = m & fieldBaseMask + m = (m >> fieldBase) + t6 + t15*1024 + t16*15632 + t6 = m & fieldBaseMask + m = (m >> fieldBase) + t7 + t16*1024 + t17*15632 + t7 = m & fieldBaseMask + m = (m >> fieldBase) + t8 + t17*1024 + t18*15632 + t8 = m & fieldBaseMask + m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368 + t9 = m & fieldMSBMask + m = m >> fieldMSBBits + + // At this point, if the magnitude is greater than 0, the overall value + // is greater than the max possible 256-bit value. In particular, it is + // "how many times larger" than the max value it is. + // + // The algorithm presented in [HAC] section 14.3.4 repeats until the + // quotient is zero. However, due to the above, we already know at + // least how many times we would need to repeat as it's the value + // currently in m. Thus we can simply multiply the magnitude by the + // field representation of the prime and do a single iteration. Notice + // that nothing will be changed when the magnitude is zero, so we could + // skip this in that case, however always running regardless allows it + // to run in constant time. The final result will be in the range + // 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a + // magnitude of 1, but it is denormalized. + d := t0 + m*977 + f.n[0] = uint32(d & fieldBaseMask) + d = (d >> fieldBase) + t1 + m*64 + f.n[1] = uint32(d & fieldBaseMask) + f.n[2] = uint32((d >> fieldBase) + t2) + f.n[3] = uint32(t3) + f.n[4] = uint32(t4) + f.n[5] = uint32(t5) + f.n[6] = uint32(t6) + f.n[7] = uint32(t7) + f.n[8] = uint32(t8) + f.n[9] = uint32(t9) + + return f +} + +// SquareRootVal either calculates the square root of the passed value when it +// exists or the square root of the negation of the value when it does not exist +// and stores the result in f in constant time. The return flag is true when +// the calculated square root is for the passed value itself and false when it +// is for its negation. +// +// Note that this function can overflow if multiplying any of the individual +// words exceeds a max uint32. In practice, this means the magnitude of the +// field must be a max of 8 to prevent overflow. The magnitude of the result +// will be 1. +// +// Preconditions: +// - The input field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) SquareRootVal(val *FieldVal) bool { + // This uses the Tonelli-Shanks method for calculating the square root of + // the value when it exists. The key principles of the method follow. + // + // Fermat's little theorem states that for a nonzero number 'a' and prime + // 'p', a^(p-1) ≡ 1 (mod p). + // + // Further, Euler's criterion states that an integer 'a' has a square root + // (aka is a quadratic residue) modulo a prime if a^((p-1)/2) ≡ 1 (mod p) + // and, conversely, when it does NOT have a square root (aka 'a' is a + // non-residue) a^((p-1)/2) ≡ -1 (mod p). + // + // This can be seen by considering that Fermat's little theorem can be + // written as (a^((p-1)/2) - 1)(a^((p-1)/2) + 1) ≡ 0 (mod p). Therefore, + // one of the two factors must be 0. Then, when a ≡ x^2 (aka 'a' is a + // quadratic residue), (x^2)^((p-1)/2) ≡ x^(p-1) ≡ 1 (mod p) which implies + // the first factor must be zero. Finally, per Lagrange's theorem, the + // non-residues are the only remaining possible solutions and thus must make + // the second factor zero to satisfy Fermat's little theorem implying that + // a^((p-1)/2) ≡ -1 (mod p) for that case. + // + // The Tonelli-Shanks method uses these facts along with factoring out + // powers of two to solve a congruence that results in either the solution + // when the square root exists or the square root of the negation of the + // value when it does not. In the case of primes that are ≡ 3 (mod 4), the + // possible solutions are r = ±a^((p+1)/4) (mod p). Therefore, either r^2 ≡ + // a (mod p) is true in which case ±r are the two solutions, or r^2 ≡ -a + // (mod p) in which case 'a' is a non-residue and there are no solutions. + // + // The secp256k1 prime is ≡ 3 (mod 4), so this result applies. + // + // In other words, calculate a^((p+1)/4) and then square it and check it + // against the original value to determine if it is actually the square + // root. + // + // In order to efficiently compute a^((p+1)/4), (p+1)/4 needs to be split + // into a sequence of squares and multiplications that minimizes the number + // of multiplications needed (since they are more costly than squarings). + // + // The secp256k1 prime + 1 / 4 is 2^254 - 2^30 - 244. In binary, that is: + // + // 00111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 11111111 11111111 11111111 11111111 + // 10111111 11111111 11111111 00001100 + // + // Notice that can be broken up into three windows of consecutive 1s (in + // order of least to most significant) as: + // + // 6-bit window with two bits set (bits 4, 5, 6, 7 unset) + // 23-bit window with 22 bits set (bit 30 unset) + // 223-bit window with all 223 bits set + // + // Thus, the groups of 1 bits in each window forms the set: + // S = {2, 22, 223}. + // + // The strategy is to calculate a^(2^n - 1) for each grouping via an + // addition chain with a sliding window. + // + // The addition chain used is (credits to Peter Dettman): + // (0,0),(1,0),(2,2),(3,2),(4,1),(5,5),(6,6),(7,7),(8,8),(9,7),(10,2) + // => 2^1 2^[2] 2^3 2^6 2^9 2^11 2^[22] 2^44 2^88 2^176 2^220 2^[223] + // + // This has a cost of 254 field squarings and 13 field multiplications. + var a, a2, a3, a6, a9, a11, a22, a44, a88, a176, a220, a223 FieldVal + a.Set(val) + a2.SquareVal(&a).Mul(&a) // a2 = a^(2^2 - 1) + a3.SquareVal(&a2).Mul(&a) // a3 = a^(2^3 - 1) + a6.SquareVal(&a3).Square().Square() // a6 = a^(2^6 - 2^3) + a6.Mul(&a3) // a6 = a^(2^6 - 1) + a9.SquareVal(&a6).Square().Square() // a9 = a^(2^9 - 2^3) + a9.Mul(&a3) // a9 = a^(2^9 - 1) + a11.SquareVal(&a9).Square() // a11 = a^(2^11 - 2^2) + a11.Mul(&a2) // a11 = a^(2^11 - 1) + a22.SquareVal(&a11).Square().Square().Square().Square() // a22 = a^(2^16 - 2^5) + a22.Square().Square().Square().Square().Square() // a22 = a^(2^21 - 2^10) + a22.Square() // a22 = a^(2^22 - 2^11) + a22.Mul(&a11) // a22 = a^(2^22 - 1) + a44.SquareVal(&a22).Square().Square().Square().Square() // a44 = a^(2^27 - 2^5) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^32 - 2^10) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^37 - 2^15) + a44.Square().Square().Square().Square().Square() // a44 = a^(2^42 - 2^20) + a44.Square().Square() // a44 = a^(2^44 - 2^22) + a44.Mul(&a22) // a44 = a^(2^44 - 1) + a88.SquareVal(&a44).Square().Square().Square().Square() // a88 = a^(2^49 - 2^5) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^54 - 2^10) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^59 - 2^15) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^64 - 2^20) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^69 - 2^25) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^74 - 2^30) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^79 - 2^35) + a88.Square().Square().Square().Square().Square() // a88 = a^(2^84 - 2^40) + a88.Square().Square().Square().Square() // a88 = a^(2^88 - 2^44) + a88.Mul(&a44) // a88 = a^(2^88 - 1) + a176.SquareVal(&a88).Square().Square().Square().Square() // a176 = a^(2^93 - 2^5) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^98 - 2^10) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^103 - 2^15) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^108 - 2^20) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^113 - 2^25) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^118 - 2^30) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^123 - 2^35) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^128 - 2^40) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^133 - 2^45) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^138 - 2^50) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^143 - 2^55) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^148 - 2^60) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^153 - 2^65) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^158 - 2^70) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^163 - 2^75) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^168 - 2^80) + a176.Square().Square().Square().Square().Square() // a176 = a^(2^173 - 2^85) + a176.Square().Square().Square() // a176 = a^(2^176 - 2^88) + a176.Mul(&a88) // a176 = a^(2^176 - 1) + a220.SquareVal(&a176).Square().Square().Square().Square() // a220 = a^(2^181 - 2^5) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^186 - 2^10) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^191 - 2^15) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^196 - 2^20) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^201 - 2^25) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^206 - 2^30) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^211 - 2^35) + a220.Square().Square().Square().Square().Square() // a220 = a^(2^216 - 2^40) + a220.Square().Square().Square().Square() // a220 = a^(2^220 - 2^44) + a220.Mul(&a44) // a220 = a^(2^220 - 1) + a223.SquareVal(&a220).Square().Square() // a223 = a^(2^223 - 2^3) + a223.Mul(&a3) // a223 = a^(2^223 - 1) + + f.SquareVal(&a223).Square().Square().Square().Square() // f = a^(2^228 - 2^5) + f.Square().Square().Square().Square().Square() // f = a^(2^233 - 2^10) + f.Square().Square().Square().Square().Square() // f = a^(2^238 - 2^15) + f.Square().Square().Square().Square().Square() // f = a^(2^243 - 2^20) + f.Square().Square().Square() // f = a^(2^246 - 2^23) + f.Mul(&a22) // f = a^(2^246 - 2^22 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^251 - 2^27 - 2^5) + f.Square() // f = a^(2^252 - 2^28 - 2^6) + f.Mul(&a2) // f = a^(2^252 - 2^28 - 2^6 - 2^1 - 1) + f.Square().Square() // f = a^(2^254 - 2^30 - 2^8 - 2^3 - 2^2) + // // = a^(2^254 - 2^30 - 244) + // // = a^((p+1)/4) + + // Ensure the calculated result is actually the square root by squaring it + // and checking against the original value. + var sqr FieldVal + return sqr.SquareVal(f).Normalize().Equals(val.Normalize()) +} + +// Square squares the field value in constant time. The existing field value is +// modified. Note that this function can overflow if multiplying any of the +// individual words exceeds a max uint32. In practice, this means the magnitude +// of the field must be a max of 8 to prevent overflow. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Square().Mul(f2) so that f = f^2 * f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Square() *FieldVal { + return f.SquareVal(f) +} + +// SquareVal squares the passed value and stores the result in f in constant +// time. Note that this function can overflow if multiplying any of the +// individual words exceeds a max uint32. In practice, this means the magnitude +// of the field being squared must be a max of 8 to prevent overflow. +// +// The field value is returned to support chaining. This enables syntax like: +// f3.SquareVal(f).Mul(f) so that f3 = f^2 * f = f^3. +// +// Preconditions: +// - The input field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) SquareVal(val *FieldVal) *FieldVal { + // This could be done with a couple of for loops and an array to store + // the intermediate terms, but this unrolled version is significantly + // faster. + + // Terms for 2^(fieldBase*0). + m := uint64(val.n[0]) * uint64(val.n[0]) + t0 := m & fieldBaseMask + + // Terms for 2^(fieldBase*1). + m = (m >> fieldBase) + 2*uint64(val.n[0])*uint64(val.n[1]) + t1 := m & fieldBaseMask + + // Terms for 2^(fieldBase*2). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[2]) + + uint64(val.n[1])*uint64(val.n[1]) + t2 := m & fieldBaseMask + + // Terms for 2^(fieldBase*3). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[3]) + + 2*uint64(val.n[1])*uint64(val.n[2]) + t3 := m & fieldBaseMask + + // Terms for 2^(fieldBase*4). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[4]) + + 2*uint64(val.n[1])*uint64(val.n[3]) + + uint64(val.n[2])*uint64(val.n[2]) + t4 := m & fieldBaseMask + + // Terms for 2^(fieldBase*5). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[5]) + + 2*uint64(val.n[1])*uint64(val.n[4]) + + 2*uint64(val.n[2])*uint64(val.n[3]) + t5 := m & fieldBaseMask + + // Terms for 2^(fieldBase*6). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[6]) + + 2*uint64(val.n[1])*uint64(val.n[5]) + + 2*uint64(val.n[2])*uint64(val.n[4]) + + uint64(val.n[3])*uint64(val.n[3]) + t6 := m & fieldBaseMask + + // Terms for 2^(fieldBase*7). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[7]) + + 2*uint64(val.n[1])*uint64(val.n[6]) + + 2*uint64(val.n[2])*uint64(val.n[5]) + + 2*uint64(val.n[3])*uint64(val.n[4]) + t7 := m & fieldBaseMask + + // Terms for 2^(fieldBase*8). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[8]) + + 2*uint64(val.n[1])*uint64(val.n[7]) + + 2*uint64(val.n[2])*uint64(val.n[6]) + + 2*uint64(val.n[3])*uint64(val.n[5]) + + uint64(val.n[4])*uint64(val.n[4]) + t8 := m & fieldBaseMask + + // Terms for 2^(fieldBase*9). + m = (m >> fieldBase) + + 2*uint64(val.n[0])*uint64(val.n[9]) + + 2*uint64(val.n[1])*uint64(val.n[8]) + + 2*uint64(val.n[2])*uint64(val.n[7]) + + 2*uint64(val.n[3])*uint64(val.n[6]) + + 2*uint64(val.n[4])*uint64(val.n[5]) + t9 := m & fieldBaseMask + + // Terms for 2^(fieldBase*10). + m = (m >> fieldBase) + + 2*uint64(val.n[1])*uint64(val.n[9]) + + 2*uint64(val.n[2])*uint64(val.n[8]) + + 2*uint64(val.n[3])*uint64(val.n[7]) + + 2*uint64(val.n[4])*uint64(val.n[6]) + + uint64(val.n[5])*uint64(val.n[5]) + t10 := m & fieldBaseMask + + // Terms for 2^(fieldBase*11). + m = (m >> fieldBase) + + 2*uint64(val.n[2])*uint64(val.n[9]) + + 2*uint64(val.n[3])*uint64(val.n[8]) + + 2*uint64(val.n[4])*uint64(val.n[7]) + + 2*uint64(val.n[5])*uint64(val.n[6]) + t11 := m & fieldBaseMask + + // Terms for 2^(fieldBase*12). + m = (m >> fieldBase) + + 2*uint64(val.n[3])*uint64(val.n[9]) + + 2*uint64(val.n[4])*uint64(val.n[8]) + + 2*uint64(val.n[5])*uint64(val.n[7]) + + uint64(val.n[6])*uint64(val.n[6]) + t12 := m & fieldBaseMask + + // Terms for 2^(fieldBase*13). + m = (m >> fieldBase) + + 2*uint64(val.n[4])*uint64(val.n[9]) + + 2*uint64(val.n[5])*uint64(val.n[8]) + + 2*uint64(val.n[6])*uint64(val.n[7]) + t13 := m & fieldBaseMask + + // Terms for 2^(fieldBase*14). + m = (m >> fieldBase) + + 2*uint64(val.n[5])*uint64(val.n[9]) + + 2*uint64(val.n[6])*uint64(val.n[8]) + + uint64(val.n[7])*uint64(val.n[7]) + t14 := m & fieldBaseMask + + // Terms for 2^(fieldBase*15). + m = (m >> fieldBase) + + 2*uint64(val.n[6])*uint64(val.n[9]) + + 2*uint64(val.n[7])*uint64(val.n[8]) + t15 := m & fieldBaseMask + + // Terms for 2^(fieldBase*16). + m = (m >> fieldBase) + + 2*uint64(val.n[7])*uint64(val.n[9]) + + uint64(val.n[8])*uint64(val.n[8]) + t16 := m & fieldBaseMask + + // Terms for 2^(fieldBase*17). + m = (m >> fieldBase) + 2*uint64(val.n[8])*uint64(val.n[9]) + t17 := m & fieldBaseMask + + // Terms for 2^(fieldBase*18). + m = (m >> fieldBase) + uint64(val.n[9])*uint64(val.n[9]) + t18 := m & fieldBaseMask + + // What's left is for 2^(fieldBase*19). + t19 := m >> fieldBase + + // At this point, all of the terms are grouped into their respective + // base. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, highly efficient + // reduction can be achieved per the provided algorithm. + // + // The secp256k1 prime is equivalent to 2^256 - 4294968273, so it fits + // this criteria. + // + // 4294968273 in field representation (base 2^26) is: + // n[0] = 977 + // n[1] = 64 + // That is to say (2^26 * 64) + 977 = 4294968273 + // + // Since each word is in base 26, the upper terms (t10 and up) start + // at 260 bits (versus the final desired range of 256 bits), so the + // field representation of 'c' from above needs to be adjusted for the + // extra 4 bits by multiplying it by 2^4 = 16. 4294968273 * 16 = + // 68719492368. Thus, the adjusted field representation of 'c' is: + // n[0] = 977 * 16 = 15632 + // n[1] = 64 * 16 = 1024 + // That is to say (2^26 * 1024) + 15632 = 68719492368 + // + // To reduce the final term, t19, the entire 'c' value is needed instead + // of only n[0] because there are no more terms left to handle n[1]. + // This means there might be some magnitude left in the upper bits that + // is handled below. + m = t0 + t10*15632 + t0 = m & fieldBaseMask + m = (m >> fieldBase) + t1 + t10*1024 + t11*15632 + t1 = m & fieldBaseMask + m = (m >> fieldBase) + t2 + t11*1024 + t12*15632 + t2 = m & fieldBaseMask + m = (m >> fieldBase) + t3 + t12*1024 + t13*15632 + t3 = m & fieldBaseMask + m = (m >> fieldBase) + t4 + t13*1024 + t14*15632 + t4 = m & fieldBaseMask + m = (m >> fieldBase) + t5 + t14*1024 + t15*15632 + t5 = m & fieldBaseMask + m = (m >> fieldBase) + t6 + t15*1024 + t16*15632 + t6 = m & fieldBaseMask + m = (m >> fieldBase) + t7 + t16*1024 + t17*15632 + t7 = m & fieldBaseMask + m = (m >> fieldBase) + t8 + t17*1024 + t18*15632 + t8 = m & fieldBaseMask + m = (m >> fieldBase) + t9 + t18*1024 + t19*68719492368 + t9 = m & fieldMSBMask + m = m >> fieldMSBBits + + // At this point, if the magnitude is greater than 0, the overall value + // is greater than the max possible 256-bit value. In particular, it is + // "how many times larger" than the max value it is. + // + // The algorithm presented in [HAC] section 14.3.4 repeats until the + // quotient is zero. However, due to the above, we already know at + // least how many times we would need to repeat as it's the value + // currently in m. Thus we can simply multiply the magnitude by the + // field representation of the prime and do a single iteration. Notice + // that nothing will be changed when the magnitude is zero, so we could + // skip this in that case, however always running regardless allows it + // to run in constant time. The final result will be in the range + // 0 <= result <= prime + (2^64 - c), so it is guaranteed to have a + // magnitude of 1, but it is denormalized. + n := t0 + m*977 + f.n[0] = uint32(n & fieldBaseMask) + n = (n >> fieldBase) + t1 + m*64 + f.n[1] = uint32(n & fieldBaseMask) + f.n[2] = uint32((n >> fieldBase) + t2) + f.n[3] = uint32(t3) + f.n[4] = uint32(t4) + f.n[5] = uint32(t5) + f.n[6] = uint32(t6) + f.n[7] = uint32(t7) + f.n[8] = uint32(t8) + f.n[9] = uint32(t9) + + return f +} + +// Inverse finds the modular multiplicative inverse of the field value in +// constant time. The existing field value is modified. +// +// The field value is returned to support chaining. This enables syntax like: +// f.Inverse().Mul(f2) so that f = f^-1 * f2. +// +// Preconditions: +// - The field value MUST have a max magnitude of 8 +// Output Normalized: No +// Output Max Magnitude: 1 +func (f *FieldVal) Inverse() *FieldVal { + // Fermat's little theorem states that for a nonzero number a and prime + // prime p, a^(p-1) = 1 (mod p). Since the multiplicative inverse is + // a*b = 1 (mod p), it follows that b = a*a^(p-2) = a^(p-1) = 1 (mod p). + // Thus, a^(p-2) is the multiplicative inverse. + // + // In order to efficiently compute a^(p-2), p-2 needs to be split into + // a sequence of squares and multiplications that minimizes the number + // of multiplications needed (since they are more costly than + // squarings). Intermediate results are saved and reused as well. + // + // The secp256k1 prime - 2 is 2^256 - 4294968275. + // + // This has a cost of 258 field squarings and 33 field multiplications. + var a2, a3, a4, a10, a11, a21, a42, a45, a63, a1019, a1023 FieldVal + a2.SquareVal(f) + a3.Mul2(&a2, f) + a4.SquareVal(&a2) + a10.SquareVal(&a4).Mul(&a2) + a11.Mul2(&a10, f) + a21.Mul2(&a10, &a11) + a42.SquareVal(&a21) + a45.Mul2(&a42, &a3) + a63.Mul2(&a42, &a21) + a1019.SquareVal(&a63).Square().Square().Square().Mul(&a11) + a1023.Mul2(&a1019, &a4) + f.Set(&a63) // f = a^(2^6 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^11 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^16 - 1024) + f.Mul(&a1023) // f = a^(2^16 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^21 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^26 - 1024) + f.Mul(&a1023) // f = a^(2^26 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^31 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^36 - 1024) + f.Mul(&a1023) // f = a^(2^36 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^41 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^46 - 1024) + f.Mul(&a1023) // f = a^(2^46 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^51 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^56 - 1024) + f.Mul(&a1023) // f = a^(2^56 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^61 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^66 - 1024) + f.Mul(&a1023) // f = a^(2^66 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^71 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^76 - 1024) + f.Mul(&a1023) // f = a^(2^76 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^81 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^86 - 1024) + f.Mul(&a1023) // f = a^(2^86 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^91 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^96 - 1024) + f.Mul(&a1023) // f = a^(2^96 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^101 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^106 - 1024) + f.Mul(&a1023) // f = a^(2^106 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^111 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^116 - 1024) + f.Mul(&a1023) // f = a^(2^116 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^121 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^126 - 1024) + f.Mul(&a1023) // f = a^(2^126 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^131 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^136 - 1024) + f.Mul(&a1023) // f = a^(2^136 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^141 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^146 - 1024) + f.Mul(&a1023) // f = a^(2^146 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^151 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^156 - 1024) + f.Mul(&a1023) // f = a^(2^156 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^161 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^166 - 1024) + f.Mul(&a1023) // f = a^(2^166 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^171 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^176 - 1024) + f.Mul(&a1023) // f = a^(2^176 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^181 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^186 - 1024) + f.Mul(&a1023) // f = a^(2^186 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^191 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^196 - 1024) + f.Mul(&a1023) // f = a^(2^196 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^201 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^206 - 1024) + f.Mul(&a1023) // f = a^(2^206 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^211 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^216 - 1024) + f.Mul(&a1023) // f = a^(2^216 - 1) + f.Square().Square().Square().Square().Square() // f = a^(2^221 - 32) + f.Square().Square().Square().Square().Square() // f = a^(2^226 - 1024) + f.Mul(&a1019) // f = a^(2^226 - 5) + f.Square().Square().Square().Square().Square() // f = a^(2^231 - 160) + f.Square().Square().Square().Square().Square() // f = a^(2^236 - 5120) + f.Mul(&a1023) // f = a^(2^236 - 4097) + f.Square().Square().Square().Square().Square() // f = a^(2^241 - 131104) + f.Square().Square().Square().Square().Square() // f = a^(2^246 - 4195328) + f.Mul(&a1023) // f = a^(2^246 - 4194305) + f.Square().Square().Square().Square().Square() // f = a^(2^251 - 134217760) + f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320) + return f.Mul(&a45) // f = a^(2^256 - 4294968275) = a^(p-2) +} + +// IsGtOrEqPrimeMinusOrder returns whether or not the field value exceeds the +// group order divided by 2 in constant time. +// +// Preconditions: +// - The field value MUST be normalized +func (f *FieldVal) IsGtOrEqPrimeMinusOrder() bool { + // The secp256k1 prime is equivalent to 2^256 - 4294968273 and the group + // order is 2^256 - 432420386565659656852420866394968145599. Thus, + // the prime minus the group order is: + // 432420386565659656852420866390673177326 + // + // In hex that is: + // 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da172 2fc9baee + // + // Converting that to field representation (base 2^26) is: + // + // n[0] = 0x03c9baee + // n[1] = 0x03685c8b + // n[2] = 0x01fc4402 + // n[3] = 0x006542dd + // n[4] = 0x01455123 + // + // This can be verified with the following test code: + // pMinusN := new(big.Int).Sub(curveParams.P, curveParams.N) + // var fv FieldVal + // fv.SetByteSlice(pMinusN.Bytes()) + // t.Logf("%x", fv.n) + // + // Outputs: [3c9baee 3685c8b 1fc4402 6542dd 1455123 0 0 0 0 0] + const ( + pMinusNWordZero = 0x03c9baee + pMinusNWordOne = 0x03685c8b + pMinusNWordTwo = 0x01fc4402 + pMinusNWordThree = 0x006542dd + pMinusNWordFour = 0x01455123 + pMinusNWordFive = 0x00000000 + pMinusNWordSix = 0x00000000 + pMinusNWordSeven = 0x00000000 + pMinusNWordEight = 0x00000000 + pMinusNWordNine = 0x00000000 + ) + + // The intuition here is that the value is greater than field prime minus + // the group order if one of the higher individual words is greater than the + // corresponding word and all higher words in the value are equal. + result := constantTimeGreater(f.n[9], pMinusNWordNine) + highWordsEqual := constantTimeEq(f.n[9], pMinusNWordNine) + result |= highWordsEqual & constantTimeGreater(f.n[8], pMinusNWordEight) + highWordsEqual &= constantTimeEq(f.n[8], pMinusNWordEight) + result |= highWordsEqual & constantTimeGreater(f.n[7], pMinusNWordSeven) + highWordsEqual &= constantTimeEq(f.n[7], pMinusNWordSeven) + result |= highWordsEqual & constantTimeGreater(f.n[6], pMinusNWordSix) + highWordsEqual &= constantTimeEq(f.n[6], pMinusNWordSix) + result |= highWordsEqual & constantTimeGreater(f.n[5], pMinusNWordFive) + highWordsEqual &= constantTimeEq(f.n[5], pMinusNWordFive) + result |= highWordsEqual & constantTimeGreater(f.n[4], pMinusNWordFour) + highWordsEqual &= constantTimeEq(f.n[4], pMinusNWordFour) + result |= highWordsEqual & constantTimeGreater(f.n[3], pMinusNWordThree) + highWordsEqual &= constantTimeEq(f.n[3], pMinusNWordThree) + result |= highWordsEqual & constantTimeGreater(f.n[2], pMinusNWordTwo) + highWordsEqual &= constantTimeEq(f.n[2], pMinusNWordTwo) + result |= highWordsEqual & constantTimeGreater(f.n[1], pMinusNWordOne) + highWordsEqual &= constantTimeEq(f.n[1], pMinusNWordOne) + result |= highWordsEqual & constantTimeGreaterOrEq(f.n[0], pMinusNWordZero) + + return result != 0 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go new file mode 100644 index 0000000000..91c3d37769 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/loadprecomputed.go @@ -0,0 +1,91 @@ +// Copyright 2015 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "compress/zlib" + "encoding/base64" + "io" + "strings" + "sync" +) + +//go:generate go run genprecomps.go + +// bytePointTable describes a table used to house pre-computed values for +// accelerating scalar base multiplication. +type bytePointTable [32][256]JacobianPoint + +// compressedBytePointsFn is set to a real function by the code generation to +// return the compressed pre-computed values for accelerating scalar base +// multiplication. +var compressedBytePointsFn func() string + +// s256BytePoints houses pre-computed values used to accelerate scalar base +// multiplication such that they are only loaded on first use. +var s256BytePoints = func() func() *bytePointTable { + // mustLoadBytePoints decompresses and deserializes the pre-computed byte + // points used to accelerate scalar base multiplication for the secp256k1 + // curve. + // + // This approach is used since it allows the compile to use significantly + // less ram and be performed much faster than it is with hard-coding the + // final in-memory data structure. At the same time, it is quite fast to + // generate the in-memory data structure on first use with this approach + // versus computing the table. + // + // It will panic on any errors because the data is hard coded and thus any + // errors means something is wrong in the source code. + var data *bytePointTable + mustLoadBytePoints := func() { + // There will be no byte points to load when generating them. + if compressedBytePointsFn == nil { + return + } + bp := compressedBytePointsFn() + + // Decompress the pre-computed table used to accelerate scalar base + // multiplication. + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp)) + r, err := zlib.NewReader(decoder) + if err != nil { + panic(err) + } + serialized, err := io.ReadAll(r) + if err != nil { + panic(err) + } + + // Deserialize the precomputed byte points and set the memory table to + // them. + offset := 0 + var bytePoints bytePointTable + for byteNum := 0; byteNum < len(bytePoints); byteNum++ { + // All points in this window. + for i := 0; i < len(bytePoints[byteNum]); i++ { + p := &bytePoints[byteNum][i] + p.X.SetByteSlice(serialized[offset:]) + offset += 32 + p.Y.SetByteSlice(serialized[offset:]) + offset += 32 + p.Z.SetInt(1) + } + } + data = &bytePoints + } + + // Return a closure that initializes the data on first access. This is done + // because the table takes a non-trivial amount of memory and initializing + // it unconditionally would cause anything that imports the package, either + // directly, or indirectly via transitive deps, to use that memory even if + // the caller never accesses any parts of the package that actually needs + // access to it. + var loadBytePointsOnce sync.Once + return func() *bytePointTable { + loadBytePointsOnce.Do(mustLoadBytePoints) + return data + } +}() diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go new file mode 100644 index 0000000000..f66496ed5e --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/modnscalar.go @@ -0,0 +1,1101 @@ +// Copyright (c) 2020-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "encoding/hex" + "math/big" +) + +// References: +// [SECG]: Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [HAC]: Handbook of Applied Cryptography Menezes, van Oorschot, Vanstone. +// http://cacr.uwaterloo.ca/hac/ + +// Many elliptic curve operations require working with scalars in a finite field +// characterized by the order of the group underlying the secp256k1 curve. +// Given this precision is larger than the biggest available native type, +// obviously some form of bignum math is needed. This code implements +// specialized fixed-precision field arithmetic rather than relying on an +// arbitrary-precision arithmetic package such as math/big for dealing with the +// math modulo the group order since the size is known. As a result, rather +// large performance gains are achieved by taking advantage of many +// optimizations not available to arbitrary-precision arithmetic and generic +// modular arithmetic algorithms. +// +// There are various ways to internally represent each element. For example, +// the most obvious representation would be to use an array of 4 uint64s (64 +// bits * 4 = 256 bits). However, that representation suffers from the fact +// that there is no native Go type large enough to handle the intermediate +// results while adding or multiplying two 64-bit numbers. +// +// Given the above, this implementation represents the field elements as 8 +// uint32s with each word (array entry) treated as base 2^32. This was chosen +// because most systems at the current time are 64-bit (or at least have 64-bit +// registers available for specialized purposes such as MMX) so the intermediate +// results can typically be done using a native register (and using uint64s to +// avoid the need for additional half-word arithmetic) + +const ( + // These fields provide convenient access to each of the words of the + // secp256k1 curve group order N to improve code readability. + // + // The group order of the curve per [SECG] is: + // 0xffffffff ffffffff ffffffff fffffffe baaedce6 af48a03b bfd25e8c d0364141 + orderWordZero uint32 = 0xd0364141 + orderWordOne uint32 = 0xbfd25e8c + orderWordTwo uint32 = 0xaf48a03b + orderWordThree uint32 = 0xbaaedce6 + orderWordFour uint32 = 0xfffffffe + orderWordFive uint32 = 0xffffffff + orderWordSix uint32 = 0xffffffff + orderWordSeven uint32 = 0xffffffff + + // These fields provide convenient access to each of the words of the two's + // complement of the secp256k1 curve group order N to improve code + // readability. + // + // The two's complement of the group order is: + // 0x00000000 00000000 00000000 00000001 45512319 50b75fc4 402da173 2fc9bebf + orderComplementWordZero uint32 = (^orderWordZero) + 1 + orderComplementWordOne uint32 = ^orderWordOne + orderComplementWordTwo uint32 = ^orderWordTwo + orderComplementWordThree uint32 = ^orderWordThree + //orderComplementWordFour uint32 = ^orderWordFour // unused + //orderComplementWordFive uint32 = ^orderWordFive // unused + //orderComplementWordSix uint32 = ^orderWordSix // unused + //orderComplementWordSeven uint32 = ^orderWordSeven // unused + + // These fields provide convenient access to each of the words of the + // secp256k1 curve group order N / 2 to improve code readability and avoid + // the need to recalculate them. + // + // The half order of the secp256k1 curve group is: + // 0x7fffffff ffffffff ffffffff ffffffff 5d576e73 57a4501d dfe92f46 681b20a0 + halfOrderWordZero uint32 = 0x681b20a0 + halfOrderWordOne uint32 = 0xdfe92f46 + halfOrderWordTwo uint32 = 0x57a4501d + halfOrderWordThree uint32 = 0x5d576e73 + halfOrderWordFour uint32 = 0xffffffff + halfOrderWordFive uint32 = 0xffffffff + halfOrderWordSix uint32 = 0xffffffff + halfOrderWordSeven uint32 = 0x7fffffff + + // uint32Mask is simply a mask with all bits set for a uint32 and is used to + // improve the readability of the code. + uint32Mask = 0xffffffff +) + +var ( + // zero32 is an array of 32 bytes used for the purposes of zeroing and is + // defined here to avoid extra allocations. + zero32 = [32]byte{} +) + +// ModNScalar implements optimized 256-bit constant-time fixed-precision +// arithmetic over the secp256k1 group order. This means all arithmetic is +// performed modulo: +// +// 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 +// +// It only implements the arithmetic needed for elliptic curve operations, +// however, the operations that are not implemented can typically be worked +// around if absolutely needed. For example, subtraction can be performed by +// adding the negation. +// +// Should it be absolutely necessary, conversion to the standard library +// math/big.Int can be accomplished by using the Bytes method, slicing the +// resulting fixed-size array, and feeding it to big.Int.SetBytes. However, +// that should typically be avoided when possible as conversion to big.Ints +// requires allocations, is not constant time, and is slower when working modulo +// the group order. +type ModNScalar struct { + // The scalar is represented as 8 32-bit integers in base 2^32. + // + // The following depicts the internal representation: + // --------------------------------------------------------- + // | n[7] | n[6] | ... | n[0] | + // | 32 bits | 32 bits | ... | 32 bits | + // | Mult: 2^(32*7) | Mult: 2^(32*6) | ... | Mult: 2^(32*0) | + // --------------------------------------------------------- + // + // For example, consider the number 2^87 + 2^42 + 1. It would be + // represented as: + // n[0] = 1 + // n[1] = 2^10 + // n[2] = 2^23 + // n[3..7] = 0 + // + // The full 256-bit value is then calculated by looping i from 7..0 and + // doing sum(n[i] * 2^(32i)) like so: + // n[7] * 2^(32*7) = 0 * 2^224 = 0 + // n[6] * 2^(32*6) = 0 * 2^192 = 0 + // ... + // n[2] * 2^(32*2) = 2^23 * 2^64 = 2^87 + // n[1] * 2^(32*1) = 2^10 * 2^32 = 2^42 + // n[0] * 2^(32*0) = 1 * 2^0 = 1 + // Sum: 0 + 0 + ... + 2^87 + 2^42 + 1 = 2^87 + 2^42 + 1 + n [8]uint32 +} + +// String returns the scalar as a human-readable hex string. +// +// This is NOT constant time. +func (s ModNScalar) String() string { + b := s.Bytes() + return hex.EncodeToString(b[:]) +} + +// Set sets the scalar equal to a copy of the passed one in constant time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s := new(ModNScalar).Set(s2).Add(1) so that s = s2 + 1 where s2 is not +// modified. +func (s *ModNScalar) Set(val *ModNScalar) *ModNScalar { + *s = *val + return s +} + +// Zero sets the scalar to zero in constant time. A newly created scalar is +// already set to zero. This function can be useful to clear an existing scalar +// for reuse. +func (s *ModNScalar) Zero() { + s.n[0] = 0 + s.n[1] = 0 + s.n[2] = 0 + s.n[3] = 0 + s.n[4] = 0 + s.n[5] = 0 + s.n[6] = 0 + s.n[7] = 0 +} + +// IsZeroBit returns 1 when the scalar is equal to zero or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. See IsZero for the version that returns +// a bool. +func (s *ModNScalar) IsZeroBit() uint32 { + // The scalar can only be zero if no bits are set in any of the words. + bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7] + return constantTimeEq(bits, 0) +} + +// IsZero returns whether or not the scalar is equal to zero in constant time. +func (s *ModNScalar) IsZero() bool { + // The scalar can only be zero if no bits are set in any of the words. + bits := s.n[0] | s.n[1] | s.n[2] | s.n[3] | s.n[4] | s.n[5] | s.n[6] | s.n[7] + return bits == 0 +} + +// SetInt sets the scalar to the passed integer in constant time. This is a +// convenience function since it is fairly common to perform some arithmetic +// with small native integers. +// +// The scalar is returned to support chaining. This enables syntax like: +// s := new(ModNScalar).SetInt(2).Mul(s2) so that s = 2 * s2. +func (s *ModNScalar) SetInt(ui uint32) *ModNScalar { + s.Zero() + s.n[0] = ui + return s +} + +// constantTimeEq returns 1 if a == b or 0 otherwise in constant time. +func constantTimeEq(a, b uint32) uint32 { + return uint32((uint64(a^b) - 1) >> 63) +} + +// constantTimeNotEq returns 1 if a != b or 0 otherwise in constant time. +func constantTimeNotEq(a, b uint32) uint32 { + return ^uint32((uint64(a^b)-1)>>63) & 1 +} + +// constantTimeLess returns 1 if a < b or 0 otherwise in constant time. +func constantTimeLess(a, b uint32) uint32 { + return uint32((uint64(a) - uint64(b)) >> 63) +} + +// constantTimeLessOrEq returns 1 if a <= b or 0 otherwise in constant time. +func constantTimeLessOrEq(a, b uint32) uint32 { + return uint32((uint64(a) - uint64(b) - 1) >> 63) +} + +// constantTimeGreater returns 1 if a > b or 0 otherwise in constant time. +func constantTimeGreater(a, b uint32) uint32 { + return constantTimeLess(b, a) +} + +// constantTimeGreaterOrEq returns 1 if a >= b or 0 otherwise in constant time. +func constantTimeGreaterOrEq(a, b uint32) uint32 { + return constantTimeLessOrEq(b, a) +} + +// constantTimeMin returns min(a,b) in constant time. +func constantTimeMin(a, b uint32) uint32 { + return b ^ ((a ^ b) & -constantTimeLess(a, b)) +} + +// overflows determines if the current scalar is greater than or equal to the +// group order in constant time and returns 1 if it is or 0 otherwise. +func (s *ModNScalar) overflows() uint32 { + // The intuition here is that the scalar is greater than the group order if + // one of the higher individual words is greater than corresponding word of + // the group order and all higher words in the scalar are equal to their + // corresponding word of the group order. Since this type is modulo the + // group order, being equal is also an overflow back to 0. + // + // Note that the words 5, 6, and 7 are all the max uint32 value, so there is + // no need to test if those individual words of the scalar exceeds them, + // hence, only equality is checked for them. + highWordsEqual := constantTimeEq(s.n[7], orderWordSeven) + highWordsEqual &= constantTimeEq(s.n[6], orderWordSix) + highWordsEqual &= constantTimeEq(s.n[5], orderWordFive) + overflow := highWordsEqual & constantTimeGreater(s.n[4], orderWordFour) + highWordsEqual &= constantTimeEq(s.n[4], orderWordFour) + overflow |= highWordsEqual & constantTimeGreater(s.n[3], orderWordThree) + highWordsEqual &= constantTimeEq(s.n[3], orderWordThree) + overflow |= highWordsEqual & constantTimeGreater(s.n[2], orderWordTwo) + highWordsEqual &= constantTimeEq(s.n[2], orderWordTwo) + overflow |= highWordsEqual & constantTimeGreater(s.n[1], orderWordOne) + highWordsEqual &= constantTimeEq(s.n[1], orderWordOne) + overflow |= highWordsEqual & constantTimeGreaterOrEq(s.n[0], orderWordZero) + + return overflow +} + +// reduce256 reduces the current scalar modulo the group order in accordance +// with the overflows parameter in constant time. The overflows parameter +// specifies whether or not the scalar is known to be greater than the group +// order and MUST either be 1 in the case it is or 0 in the case it is not for a +// correct result. +func (s *ModNScalar) reduce256(overflows uint32) { + // Notice that since s < 2^256 < 2N (where N is the group order), the max + // possible number of reductions required is one. Therefore, in the case a + // reduction is needed, it can be performed with a single subtraction of N. + // Also, recall that subtraction is equivalent to addition by the two's + // complement while ignoring the carry. + // + // When s >= N, the overflows parameter will be 1. Conversely, it will be 0 + // when s < N. Thus multiplying by the overflows parameter will either + // result in 0 or the multiplicand itself. + // + // Combining the above along with the fact that s + 0 = s, the following is + // a constant time implementation that works by either adding 0 or the two's + // complement of N as needed. + // + // The final result will be in the range 0 <= s < N as expected. + overflows64 := uint64(overflows) + c := uint64(s.n[0]) + overflows64*uint64(orderComplementWordZero) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[1]) + overflows64*uint64(orderComplementWordOne) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[2]) + overflows64*uint64(orderComplementWordTwo) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[3]) + overflows64*uint64(orderComplementWordThree) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[4]) + overflows64 // * 1 + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[5]) // + overflows64 * 0 + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[6]) // + overflows64 * 0 + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(s.n[7]) // + overflows64 * 0 + s.n[7] = uint32(c & uint32Mask) +} + +// SetBytes interprets the provided array as a 256-bit big-endian unsigned +// integer, reduces it modulo the group order, sets the scalar to the result, +// and returns either 1 if it was reduced (aka it overflowed) or 0 otherwise in +// constant time. +// +// Note that a bool is not used here because it is not possible in Go to convert +// from a bool to numeric value in constant time and many constant-time +// operations require a numeric value. +func (s *ModNScalar) SetBytes(b *[32]byte) uint32 { + // Pack the 256 total bits across the 8 uint32 words. This could be done + // with a for loop, but benchmarks show this unrolled version is about 2 + // times faster than the variant that uses a loop. + s.n[0] = uint32(b[31]) | uint32(b[30])<<8 | uint32(b[29])<<16 | uint32(b[28])<<24 + s.n[1] = uint32(b[27]) | uint32(b[26])<<8 | uint32(b[25])<<16 | uint32(b[24])<<24 + s.n[2] = uint32(b[23]) | uint32(b[22])<<8 | uint32(b[21])<<16 | uint32(b[20])<<24 + s.n[3] = uint32(b[19]) | uint32(b[18])<<8 | uint32(b[17])<<16 | uint32(b[16])<<24 + s.n[4] = uint32(b[15]) | uint32(b[14])<<8 | uint32(b[13])<<16 | uint32(b[12])<<24 + s.n[5] = uint32(b[11]) | uint32(b[10])<<8 | uint32(b[9])<<16 | uint32(b[8])<<24 + s.n[6] = uint32(b[7]) | uint32(b[6])<<8 | uint32(b[5])<<16 | uint32(b[4])<<24 + s.n[7] = uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + + // The value might be >= N, so reduce it as required and return whether or + // not it was reduced. + needsReduce := s.overflows() + s.reduce256(needsReduce) + return needsReduce +} + +// zeroArray32 zeroes the provided 32-byte buffer. +func zeroArray32(b *[32]byte) { + copy(b[:], zero32[:]) +} + +// SetByteSlice interprets the provided slice as a 256-bit big-endian unsigned +// integer (meaning it is truncated to the first 32 bytes), reduces it modulo +// the group order, sets the scalar to the result, and returns whether or not +// the resulting truncated 256-bit integer overflowed in constant time. +// +// Note that since passing a slice with more than 32 bytes is truncated, it is +// possible that the truncated value is less than the order of the curve and +// hence it will not be reported as having overflowed in that case. It is up to +// the caller to decide whether it needs to provide numbers of the appropriate +// size or it is acceptable to use this function with the described truncation +// and overflow behavior. +func (s *ModNScalar) SetByteSlice(b []byte) bool { + var b32 [32]byte + b = b[:constantTimeMin(uint32(len(b)), 32)] + copy(b32[:], b32[:32-len(b)]) + copy(b32[32-len(b):], b) + result := s.SetBytes(&b32) + zeroArray32(&b32) + return result != 0 +} + +// PutBytesUnchecked unpacks the scalar to a 32-byte big-endian value directly +// into the passed byte slice in constant time. The target slice must must have +// at least 32 bytes available or it will panic. +// +// There is a similar function, PutBytes, which unpacks the scalar into a +// 32-byte array directly. This version is provided since it can be useful to +// write directly into part of a larger buffer without needing a separate +// allocation. +// +// Preconditions: +// - The target slice MUST have at least 32 bytes available +func (s *ModNScalar) PutBytesUnchecked(b []byte) { + // Unpack the 256 total bits from the 8 uint32 words. This could be done + // with a for loop, but benchmarks show this unrolled version is about 2 + // times faster than the variant which uses a loop. + b[31] = byte(s.n[0]) + b[30] = byte(s.n[0] >> 8) + b[29] = byte(s.n[0] >> 16) + b[28] = byte(s.n[0] >> 24) + b[27] = byte(s.n[1]) + b[26] = byte(s.n[1] >> 8) + b[25] = byte(s.n[1] >> 16) + b[24] = byte(s.n[1] >> 24) + b[23] = byte(s.n[2]) + b[22] = byte(s.n[2] >> 8) + b[21] = byte(s.n[2] >> 16) + b[20] = byte(s.n[2] >> 24) + b[19] = byte(s.n[3]) + b[18] = byte(s.n[3] >> 8) + b[17] = byte(s.n[3] >> 16) + b[16] = byte(s.n[3] >> 24) + b[15] = byte(s.n[4]) + b[14] = byte(s.n[4] >> 8) + b[13] = byte(s.n[4] >> 16) + b[12] = byte(s.n[4] >> 24) + b[11] = byte(s.n[5]) + b[10] = byte(s.n[5] >> 8) + b[9] = byte(s.n[5] >> 16) + b[8] = byte(s.n[5] >> 24) + b[7] = byte(s.n[6]) + b[6] = byte(s.n[6] >> 8) + b[5] = byte(s.n[6] >> 16) + b[4] = byte(s.n[6] >> 24) + b[3] = byte(s.n[7]) + b[2] = byte(s.n[7] >> 8) + b[1] = byte(s.n[7] >> 16) + b[0] = byte(s.n[7] >> 24) +} + +// PutBytes unpacks the scalar to a 32-byte big-endian value using the passed +// byte array in constant time. +// +// There is a similar function, PutBytesUnchecked, which unpacks the scalar into +// a slice that must have at least 32 bytes available. This version is provided +// since it can be useful to write directly into an array that is type checked. +// +// Alternatively, there is also Bytes, which unpacks the scalar into a new array +// and returns that which can sometimes be more ergonomic in applications that +// aren't concerned about an additional copy. +func (s *ModNScalar) PutBytes(b *[32]byte) { + s.PutBytesUnchecked(b[:]) +} + +// Bytes unpacks the scalar to a 32-byte big-endian value in constant time. +// +// See PutBytes and PutBytesUnchecked for variants that allow an array or slice +// to be passed which can be useful to cut down on the number of allocations +// by allowing the caller to reuse a buffer or write directly into part of a +// larger buffer. +func (s *ModNScalar) Bytes() [32]byte { + var b [32]byte + s.PutBytesUnchecked(b[:]) + return b +} + +// IsOdd returns whether or not the scalar is an odd number in constant time. +func (s *ModNScalar) IsOdd() bool { + // Only odd numbers have the bottom bit set. + return s.n[0]&1 == 1 +} + +// Equals returns whether or not the two scalars are the same in constant time. +func (s *ModNScalar) Equals(val *ModNScalar) bool { + // Xor only sets bits when they are different, so the two scalars can only + // be the same if no bits are set after xoring each word. + bits := (s.n[0] ^ val.n[0]) | (s.n[1] ^ val.n[1]) | (s.n[2] ^ val.n[2]) | + (s.n[3] ^ val.n[3]) | (s.n[4] ^ val.n[4]) | (s.n[5] ^ val.n[5]) | + (s.n[6] ^ val.n[6]) | (s.n[7] ^ val.n[7]) + + return bits == 0 +} + +// Add2 adds the passed two scalars together modulo the group order in constant +// time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.Add2(s, s2).AddInt(1) so that s3 = s + s2 + 1. +func (s *ModNScalar) Add2(val1, val2 *ModNScalar) *ModNScalar { + c := uint64(val1.n[0]) + uint64(val2.n[0]) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[1]) + uint64(val2.n[1]) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[2]) + uint64(val2.n[2]) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[3]) + uint64(val2.n[3]) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[4]) + uint64(val2.n[4]) + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[5]) + uint64(val2.n[5]) + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[6]) + uint64(val2.n[6]) + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + uint64(val1.n[7]) + uint64(val2.n[7]) + s.n[7] = uint32(c & uint32Mask) + + // The result is now 256 bits, but it might still be >= N, so use the + // existing normal reduce method for 256-bit values. + s.reduce256(uint32(c>>32) + s.overflows()) + return s +} + +// Add adds the passed scalar to the existing one modulo the group order in +// constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Add(s2).AddInt(1) so that s = s + s2 + 1. +func (s *ModNScalar) Add(val *ModNScalar) *ModNScalar { + return s.Add2(s, val) +} + +// accumulator96 provides a 96-bit accumulator for use in the intermediate +// calculations requiring more than 64-bits. +type accumulator96 struct { + n [3]uint32 +} + +// Add adds the passed unsigned 64-bit value to the accumulator. +func (a *accumulator96) Add(v uint64) { + low := uint32(v & uint32Mask) + hi := uint32(v >> 32) + a.n[0] += low + hi += constantTimeLess(a.n[0], low) // Carry if overflow in n[0]. + a.n[1] += hi + a.n[2] += constantTimeLess(a.n[1], hi) // Carry if overflow in n[1]. +} + +// Rsh32 right shifts the accumulator by 32 bits. +func (a *accumulator96) Rsh32() { + a.n[0] = a.n[1] + a.n[1] = a.n[2] + a.n[2] = 0 +} + +// reduce385 reduces the 385-bit intermediate result in the passed terms modulo +// the group order in constant time and stores the result in s. +func (s *ModNScalar) reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12 uint64) { + // At this point, the intermediate result in the passed terms has been + // reduced to fit within 385 bits, so reduce it again using the same method + // described in reduce512. As before, the intermediate result will end up + // being reduced by another 127 bits to 258 bits, thus 9 32-bit terms are + // needed for this iteration. The reduced terms are assigned back to t0 + // through t8. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead until the value is reduced enough to use native uint64s. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0. + acc.Add(t8 * uint64(orderComplementWordZero)) + t0 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(t1) + acc.Add(t8 * uint64(orderComplementWordOne)) + acc.Add(t9 * uint64(orderComplementWordZero)) + t1 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(t2) + acc.Add(t8 * uint64(orderComplementWordTwo)) + acc.Add(t9 * uint64(orderComplementWordOne)) + acc.Add(t10 * uint64(orderComplementWordZero)) + t2 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(t3) + acc.Add(t8 * uint64(orderComplementWordThree)) + acc.Add(t9 * uint64(orderComplementWordTwo)) + acc.Add(t10 * uint64(orderComplementWordOne)) + acc.Add(t11 * uint64(orderComplementWordZero)) + t3 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(t4) + acc.Add(t8) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t9 * uint64(orderComplementWordThree)) + acc.Add(t10 * uint64(orderComplementWordTwo)) + acc.Add(t11 * uint64(orderComplementWordOne)) + acc.Add(t12 * uint64(orderComplementWordZero)) + t4 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(t5) + // acc.Add(t8 * uint64(orderComplementWordFive)) // 0 + acc.Add(t9) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t10 * uint64(orderComplementWordThree)) + acc.Add(t11 * uint64(orderComplementWordTwo)) + acc.Add(t12 * uint64(orderComplementWordOne)) + t5 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(t6) + // acc.Add(t8 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t9 * uint64(orderComplementWordFive)) // 0 + acc.Add(t10) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t11 * uint64(orderComplementWordThree)) + acc.Add(t12 * uint64(orderComplementWordTwo)) + t6 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(t7) + // acc.Add(t8 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t9 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t10 * uint64(orderComplementWordFive)) // 0 + acc.Add(t11) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t12 * uint64(orderComplementWordThree)) + t7 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + // acc.Add(t9 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t10 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t11 * uint64(orderComplementWordFive)) // 0 + acc.Add(t12) // * uint64(orderComplementWordFour) // * 1 + t8 = uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // NOTE: All of the remaining multiplications for this iteration result in 0 + // as they all involve multiplying by combinations of the fifth, sixth, and + // seventh words of the two's complement of N, which are 0, so skip them. + + // At this point, the result is reduced to fit within 258 bits, so reduce it + // again using a slightly modified version of the same method. The maximum + // value in t8 is 2 at this point and therefore multiplying it by each word + // of the two's complement of N and adding it to a 32-bit term will result + // in a maximum requirement of 33 bits, so it is safe to use native uint64s + // here for the intermediate term carry propagation. + // + // Also, since the maximum value in t8 is 2, this ends up reducing by + // another 2 bits to 256 bits. + c := t0 + t8*uint64(orderComplementWordZero) + s.n[0] = uint32(c & uint32Mask) + c = (c >> 32) + t1 + t8*uint64(orderComplementWordOne) + s.n[1] = uint32(c & uint32Mask) + c = (c >> 32) + t2 + t8*uint64(orderComplementWordTwo) + s.n[2] = uint32(c & uint32Mask) + c = (c >> 32) + t3 + t8*uint64(orderComplementWordThree) + s.n[3] = uint32(c & uint32Mask) + c = (c >> 32) + t4 + t8 // * uint64(orderComplementWordFour) == * 1 + s.n[4] = uint32(c & uint32Mask) + c = (c >> 32) + t5 // + t8*uint64(orderComplementWordFive) == 0 + s.n[5] = uint32(c & uint32Mask) + c = (c >> 32) + t6 // + t8*uint64(orderComplementWordSix) == 0 + s.n[6] = uint32(c & uint32Mask) + c = (c >> 32) + t7 // + t8*uint64(orderComplementWordSeven) == 0 + s.n[7] = uint32(c & uint32Mask) + + // The result is now 256 bits, but it might still be >= N, so use the + // existing normal reduce method for 256-bit values. + s.reduce256(uint32(c>>32) + s.overflows()) +} + +// reduce512 reduces the 512-bit intermediate result in the passed terms modulo +// the group order down to 385 bits in constant time and stores the result in s. +func (s *ModNScalar) reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15 uint64) { + // At this point, the intermediate result in the passed terms is grouped + // into the respective bases. + // + // Per [HAC] section 14.3.4: Reduction method of moduli of special form, + // when the modulus is of the special form m = b^t - c, where log_2(c) < t, + // highly efficient reduction can be achieved per the provided algorithm. + // + // The secp256k1 group order fits this criteria since it is: + // 2^256 - 432420386565659656852420866394968145599 + // + // Technically the max possible value here is (N-1)^2 since the two scalars + // being multiplied are always mod N. Nevertheless, it is safer to consider + // it to be (2^256-1)^2 = 2^512 - 2^256 + 1 since it is the product of two + // 256-bit values. + // + // The algorithm is to reduce the result modulo the prime by subtracting + // multiples of the group order N. However, in order simplify carry + // propagation, this adds with the two's complement of N to achieve the same + // result. + // + // Since the two's complement of N has 127 leading zero bits, this will end + // up reducing the intermediate result from 512 bits to 385 bits, resulting + // in 13 32-bit terms. The reduced terms are assigned back to t0 through + // t12. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.n[0] = uint32(t0) // == acc.Add(t0) because acc is guaranteed to be 0. + acc.Add(t8 * uint64(orderComplementWordZero)) + t0 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(t1) + acc.Add(t8 * uint64(orderComplementWordOne)) + acc.Add(t9 * uint64(orderComplementWordZero)) + t1 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(t2) + acc.Add(t8 * uint64(orderComplementWordTwo)) + acc.Add(t9 * uint64(orderComplementWordOne)) + acc.Add(t10 * uint64(orderComplementWordZero)) + t2 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(t3) + acc.Add(t8 * uint64(orderComplementWordThree)) + acc.Add(t9 * uint64(orderComplementWordTwo)) + acc.Add(t10 * uint64(orderComplementWordOne)) + acc.Add(t11 * uint64(orderComplementWordZero)) + t3 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(t4) + acc.Add(t8) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t9 * uint64(orderComplementWordThree)) + acc.Add(t10 * uint64(orderComplementWordTwo)) + acc.Add(t11 * uint64(orderComplementWordOne)) + acc.Add(t12 * uint64(orderComplementWordZero)) + t4 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(t5) + // acc.Add(t8 * uint64(orderComplementWordFive)) // 0 + acc.Add(t9) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t10 * uint64(orderComplementWordThree)) + acc.Add(t11 * uint64(orderComplementWordTwo)) + acc.Add(t12 * uint64(orderComplementWordOne)) + acc.Add(t13 * uint64(orderComplementWordZero)) + t5 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(t6) + // acc.Add(t8 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t9 * uint64(orderComplementWordFive)) // 0 + acc.Add(t10) // * uint64(orderComplementWordFour)) // * 1 + acc.Add(t11 * uint64(orderComplementWordThree)) + acc.Add(t12 * uint64(orderComplementWordTwo)) + acc.Add(t13 * uint64(orderComplementWordOne)) + acc.Add(t14 * uint64(orderComplementWordZero)) + t6 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(t7) + // acc.Add(t8 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t9 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t10 * uint64(orderComplementWordFive)) // 0 + acc.Add(t11) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t12 * uint64(orderComplementWordThree)) + acc.Add(t13 * uint64(orderComplementWordTwo)) + acc.Add(t14 * uint64(orderComplementWordOne)) + acc.Add(t15 * uint64(orderComplementWordZero)) + t7 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + // acc.Add(t9 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t10 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t11 * uint64(orderComplementWordFive)) // 0 + acc.Add(t12) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t13 * uint64(orderComplementWordThree)) + acc.Add(t14 * uint64(orderComplementWordTwo)) + acc.Add(t15 * uint64(orderComplementWordOne)) + t8 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*9). + // acc.Add(t10 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t11 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t12 * uint64(orderComplementWordFive)) // 0 + acc.Add(t13) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t14 * uint64(orderComplementWordThree)) + acc.Add(t15 * uint64(orderComplementWordTwo)) + t9 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*10). + // acc.Add(t11 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t12 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t13 * uint64(orderComplementWordFive)) // 0 + acc.Add(t14) // * uint64(orderComplementWordFour) // * 1 + acc.Add(t15 * uint64(orderComplementWordThree)) + t10 = uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*11). + // acc.Add(t12 * uint64(orderComplementWordSeven)) // 0 + // acc.Add(t13 * uint64(orderComplementWordSix)) // 0 + // acc.Add(t14 * uint64(orderComplementWordFive)) // 0 + acc.Add(t15) // * uint64(orderComplementWordFour) // * 1 + t11 = uint64(acc.n[0]) + acc.Rsh32() + + // NOTE: All of the remaining multiplications for this iteration result in 0 + // as they all involve multiplying by combinations of the fifth, sixth, and + // seventh words of the two's complement of N, which are 0, so skip them. + + // Terms for 2^(32*12). + t12 = uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // At this point, the result is reduced to fit within 385 bits, so reduce it + // again using the same method accordingly. + s.reduce385(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12) +} + +// Mul2 multiplies the passed two scalars together modulo the group order in +// constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.Mul2(s, s2).AddInt(1) so that s3 = (s * s2) + 1. +func (s *ModNScalar) Mul2(val, val2 *ModNScalar) *ModNScalar { + // This could be done with for loops and an array to store the intermediate + // terms, but this unrolled version is significantly faster. + + // The overall strategy employed here is: + // 1) Calculate the 512-bit product of the two scalars using the standard + // pencil-and-paper method. + // 2) Reduce the result modulo the prime by effectively subtracting + // multiples of the group order N (actually performed by adding multiples + // of the two's complement of N to avoid implementing subtraction). + // 3) Repeat step 2 noting that each iteration reduces the required number + // of bits by 127 because the two's complement of N has 127 leading zero + // bits. + // 4) Once reduced to 256 bits, call the existing reduce method to perform + // a final reduction as needed. + // + // Note that several of the intermediate calculations require adding 64-bit + // products together which would overflow a uint64, so a 96-bit accumulator + // is used instead. + + // Terms for 2^(32*0). + var acc accumulator96 + acc.Add(uint64(val.n[0]) * uint64(val2.n[0])) + t0 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*1). + acc.Add(uint64(val.n[0]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[0])) + t1 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*2). + acc.Add(uint64(val.n[0]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[0])) + t2 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*3). + acc.Add(uint64(val.n[0]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[0])) + t3 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*4). + acc.Add(uint64(val.n[0]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[0])) + t4 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*5). + acc.Add(uint64(val.n[0]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[0])) + t5 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*6). + acc.Add(uint64(val.n[0]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[0])) + t6 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*7). + acc.Add(uint64(val.n[0]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[1]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[1])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[0])) + t7 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*8). + acc.Add(uint64(val.n[1]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[2]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[2])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[1])) + t8 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*9). + acc.Add(uint64(val.n[2]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[3]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[3])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[2])) + t9 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*10). + acc.Add(uint64(val.n[3]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[4]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[4])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[3])) + t10 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*11). + acc.Add(uint64(val.n[4]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[5]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[5])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[4])) + t11 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*12). + acc.Add(uint64(val.n[5]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[6]) * uint64(val2.n[6])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[5])) + t12 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*13). + acc.Add(uint64(val.n[6]) * uint64(val2.n[7])) + acc.Add(uint64(val.n[7]) * uint64(val2.n[6])) + t13 := uint64(acc.n[0]) + acc.Rsh32() + + // Terms for 2^(32*14). + acc.Add(uint64(val.n[7]) * uint64(val2.n[7])) + t14 := uint64(acc.n[0]) + acc.Rsh32() + + // What's left is for 2^(32*15). + t15 := uint64(acc.n[0]) + // acc.Rsh32() // No need since not used after this. Guaranteed to be 0. + + // At this point, all of the terms are grouped into their respective base + // and occupy up to 512 bits. Reduce the result accordingly. + s.reduce512(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, + t15) + return s +} + +// Mul multiplies the passed scalar with the existing one modulo the group order +// in constant time and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Mul(s2).AddInt(1) so that s = (s * s2) + 1. +func (s *ModNScalar) Mul(val *ModNScalar) *ModNScalar { + return s.Mul2(s, val) +} + +// SquareVal squares the passed scalar modulo the group order in constant time +// and stores the result in s. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.SquareVal(s).Mul(s) so that s3 = s^2 * s = s^3. +func (s *ModNScalar) SquareVal(val *ModNScalar) *ModNScalar { + // This could technically be optimized slightly to take advantage of the + // fact that many of the intermediate calculations in squaring are just + // doubling, however, benchmarking has shown that due to the need to use a + // 96-bit accumulator, any savings are essentially offset by that and + // consequently there is no real difference in performance over just + // multiplying the value by itself to justify the extra code for now. This + // can be revisited in the future if it becomes a bottleneck in practice. + + return s.Mul2(val, val) +} + +// Square squares the scalar modulo the group order in constant time. The +// existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Square().Mul(s2) so that s = s^2 * s2. +func (s *ModNScalar) Square() *ModNScalar { + return s.SquareVal(s) +} + +// NegateVal negates the passed scalar modulo the group order and stores the +// result in s in constant time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.NegateVal(s2).AddInt(1) so that s = -s2 + 1. +func (s *ModNScalar) NegateVal(val *ModNScalar) *ModNScalar { + // Since the scalar is already in the range 0 <= val < N, where N is the + // group order, negation modulo the group order is just the group order + // minus the value. This implies that the result will always be in the + // desired range with the sole exception of 0 because N - 0 = N itself. + // + // Therefore, in order to avoid the need to reduce the result for every + // other case in order to achieve constant time, this creates a mask that is + // all 0s in the case of the scalar being negated is 0 and all 1s otherwise + // and bitwise ands that mask with each word. + // + // Finally, to simplify the carry propagation, this adds the two's + // complement of the scalar to N in order to achieve the same result. + bits := val.n[0] | val.n[1] | val.n[2] | val.n[3] | val.n[4] | val.n[5] | + val.n[6] | val.n[7] + mask := uint64(uint32Mask * constantTimeNotEq(bits, 0)) + c := uint64(orderWordZero) + (uint64(^val.n[0]) + 1) + s.n[0] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordOne) + uint64(^val.n[1]) + s.n[1] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordTwo) + uint64(^val.n[2]) + s.n[2] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordThree) + uint64(^val.n[3]) + s.n[3] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordFour) + uint64(^val.n[4]) + s.n[4] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordFive) + uint64(^val.n[5]) + s.n[5] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordSix) + uint64(^val.n[6]) + s.n[6] = uint32(c & mask) + c = (c >> 32) + uint64(orderWordSeven) + uint64(^val.n[7]) + s.n[7] = uint32(c & mask) + return s +} + +// Negate negates the scalar modulo the group order in constant time. The +// existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Negate().AddInt(1) so that s = -s + 1. +func (s *ModNScalar) Negate() *ModNScalar { + return s.NegateVal(s) +} + +// InverseValNonConst finds the modular multiplicative inverse of the passed +// scalar and stores result in s in *non-constant* time. +// +// The scalar is returned to support chaining. This enables syntax like: +// s3.InverseVal(s1).Mul(s2) so that s3 = s1^-1 * s2. +func (s *ModNScalar) InverseValNonConst(val *ModNScalar) *ModNScalar { + // This is making use of big integers for now. Ideally it will be replaced + // with an implementation that does not depend on big integers. + valBytes := val.Bytes() + bigVal := new(big.Int).SetBytes(valBytes[:]) + bigVal.ModInverse(bigVal, curveParams.N) + s.SetByteSlice(bigVal.Bytes()) + return s +} + +// InverseNonConst finds the modular multiplicative inverse of the scalar in +// *non-constant* time. The existing scalar is modified. +// +// The scalar is returned to support chaining. This enables syntax like: +// s.Inverse().Mul(s2) so that s = s^-1 * s2. +func (s *ModNScalar) InverseNonConst() *ModNScalar { + return s.InverseValNonConst(s) +} + +// IsOverHalfOrder returns whether or not the scalar exceeds the group order +// divided by 2 in constant time. +func (s *ModNScalar) IsOverHalfOrder() bool { + // The intuition here is that the scalar is greater than half of the group + // order if one of the higher individual words is greater than the + // corresponding word of the half group order and all higher words in the + // scalar are equal to their corresponding word of the half group order. + // + // Note that the words 4, 5, and 6 are all the max uint32 value, so there is + // no need to test if those individual words of the scalar exceeds them, + // hence, only equality is checked for them. + result := constantTimeGreater(s.n[7], halfOrderWordSeven) + highWordsEqual := constantTimeEq(s.n[7], halfOrderWordSeven) + highWordsEqual &= constantTimeEq(s.n[6], halfOrderWordSix) + highWordsEqual &= constantTimeEq(s.n[5], halfOrderWordFive) + highWordsEqual &= constantTimeEq(s.n[4], halfOrderWordFour) + result |= highWordsEqual & constantTimeGreater(s.n[3], halfOrderWordThree) + highWordsEqual &= constantTimeEq(s.n[3], halfOrderWordThree) + result |= highWordsEqual & constantTimeGreater(s.n[2], halfOrderWordTwo) + highWordsEqual &= constantTimeEq(s.n[2], halfOrderWordTwo) + result |= highWordsEqual & constantTimeGreater(s.n[1], halfOrderWordOne) + highWordsEqual &= constantTimeEq(s.n[1], halfOrderWordOne) + result |= highWordsEqual & constantTimeGreater(s.n[0], halfOrderWordZero) + + return result != 0 +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go new file mode 100644 index 0000000000..81b205d9c1 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/nonce.go @@ -0,0 +1,263 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2020 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + "bytes" + "crypto/sha256" + "hash" +) + +// References: +// [GECC]: Guide to Elliptic Curve Cryptography (Hankerson, Menezes, Vanstone) +// +// [ISO/IEC 8825-1]: Information technology — ASN.1 encoding rules: +// Specification of Basic Encoding Rules (BER), Canonical Encoding Rules +// (CER) and Distinguished Encoding Rules (DER) +// +// [SEC1]: Elliptic Curve Cryptography (May 31, 2009, Version 2.0) +// https://www.secg.org/sec1-v2.pdf + +var ( + // singleZero is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + singleZero = []byte{0x00} + + // zeroInitializer is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + zeroInitializer = bytes.Repeat([]byte{0x00}, sha256.BlockSize) + + // singleOne is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + singleOne = []byte{0x01} + + // oneInitializer is used during RFC6979 nonce generation. It is provided + // here to avoid the need to create it multiple times. + oneInitializer = bytes.Repeat([]byte{0x01}, sha256.Size) +) + +// hmacsha256 implements a resettable version of HMAC-SHA256. +type hmacsha256 struct { + inner, outer hash.Hash + ipad, opad [sha256.BlockSize]byte +} + +// Write adds data to the running hash. +func (h *hmacsha256) Write(p []byte) { + h.inner.Write(p) +} + +// initKey initializes the HMAC-SHA256 instance to the provided key. +func (h *hmacsha256) initKey(key []byte) { + // Hash the key if it is too large. + if len(key) > sha256.BlockSize { + h.outer.Write(key) + key = h.outer.Sum(nil) + } + copy(h.ipad[:], key) + copy(h.opad[:], key) + for i := range h.ipad { + h.ipad[i] ^= 0x36 + } + for i := range h.opad { + h.opad[i] ^= 0x5c + } + h.inner.Write(h.ipad[:]) +} + +// ResetKey resets the HMAC-SHA256 to its initial state and then initializes it +// with the provided key. It is equivalent to creating a new instance with the +// provided key without allocating more memory. +func (h *hmacsha256) ResetKey(key []byte) { + h.inner.Reset() + h.outer.Reset() + copy(h.ipad[:], zeroInitializer) + copy(h.opad[:], zeroInitializer) + h.initKey(key) +} + +// Resets the HMAC-SHA256 to its initial state using the current key. +func (h *hmacsha256) Reset() { + h.inner.Reset() + h.inner.Write(h.ipad[:]) +} + +// Sum returns the hash of the written data. +func (h *hmacsha256) Sum() []byte { + h.outer.Reset() + h.outer.Write(h.opad[:]) + h.outer.Write(h.inner.Sum(nil)) + return h.outer.Sum(nil) +} + +// newHMACSHA256 returns a new HMAC-SHA256 hasher using the provided key. +func newHMACSHA256(key []byte) *hmacsha256 { + h := new(hmacsha256) + h.inner = sha256.New() + h.outer = sha256.New() + h.initKey(key) + return h +} + +// NonceRFC6979 generates a nonce deterministically according to RFC 6979 using +// HMAC-SHA256 for the hashing function. It takes a 32-byte hash as an input +// and returns a 32-byte nonce to be used for deterministic signing. The extra +// and version arguments are optional, but allow additional data to be added to +// the input of the HMAC. When provided, the extra data must be 32-bytes and +// version must be 16 bytes or they will be ignored. +// +// Finally, the extraIterations parameter provides a method to produce a stream +// of deterministic nonces to ensure the signing code is able to produce a nonce +// that results in a valid signature in the extremely unlikely event the +// original nonce produced results in an invalid signature (e.g. R == 0). +// Signing code should start with 0 and increment it if necessary. +func NonceRFC6979(privKey []byte, hash []byte, extra []byte, version []byte, extraIterations uint32) *ModNScalar { + // Input to HMAC is the 32-byte private key and the 32-byte hash. In + // addition, it may include the optional 32-byte extra data and 16-byte + // version. Create a fixed-size array to avoid extra allocs and slice it + // properly. + const ( + privKeyLen = 32 + hashLen = 32 + extraLen = 32 + versionLen = 16 + ) + var keyBuf [privKeyLen + hashLen + extraLen + versionLen]byte + + // Truncate rightmost bytes of private key and hash if they are too long and + // leave left padding of zeros when they're too short. + if len(privKey) > privKeyLen { + privKey = privKey[:privKeyLen] + } + if len(hash) > hashLen { + hash = hash[:hashLen] + } + offset := privKeyLen - len(privKey) // Zero left padding if needed. + offset += copy(keyBuf[offset:], privKey) + offset += hashLen - len(hash) // Zero left padding if needed. + offset += copy(keyBuf[offset:], hash) + if len(extra) == extraLen { + offset += copy(keyBuf[offset:], extra) + if len(version) == versionLen { + offset += copy(keyBuf[offset:], version) + } + } else if len(version) == versionLen { + // When the version was specified, but not the extra data, leave the + // extra data portion all zero. + offset += privKeyLen + offset += copy(keyBuf[offset:], version) + } + key := keyBuf[:offset] + + // Step B. + // + // V = 0x01 0x01 0x01 ... 0x01 such that the length of V, in bits, is + // equal to 8*ceil(hashLen/8). + // + // Note that since the hash length is a multiple of 8 for the chosen hash + // function in this optimized implementation, the result is just the hash + // length, so avoid the extra calculations. Also, since it isn't modified, + // start with a global value. + v := oneInitializer + + // Step C (Go zeroes all allocated memory). + // + // K = 0x00 0x00 0x00 ... 0x00 such that the length of K, in bits, is + // equal to 8*ceil(hashLen/8). + // + // As above, since the hash length is a multiple of 8 for the chosen hash + // function in this optimized implementation, the result is just the hash + // length, so avoid the extra calculations. + k := zeroInitializer[:hashLen] + + // Step D. + // + // K = HMAC_K(V || 0x00 || int2octets(x) || bits2octets(h1)) + // + // Note that key is the "int2octets(x) || bits2octets(h1)" portion along + // with potential additional data as described by section 3.6 of the RFC. + hasher := newHMACSHA256(k) + hasher.Write(oneInitializer) + hasher.Write(singleZero[:]) + hasher.Write(key) + k = hasher.Sum() + + // Step E. + // + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + + // Step F. + // + // K = HMAC_K(V || 0x01 || int2octets(x) || bits2octets(h1)) + // + // Note that key is the "int2octets(x) || bits2octets(h1)" portion along + // with potential additional data as described by section 3.6 of the RFC. + hasher.Reset() + hasher.Write(v) + hasher.Write(singleOne[:]) + hasher.Write(key[:]) + k = hasher.Sum() + + // Step G. + // + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + + // Step H. + // + // Repeat until the value is nonzero and less than the curve order. + var generated uint32 + for { + // Step H1 and H2. + // + // Set T to the empty sequence. The length of T (in bits) is denoted + // tlen; thus, at that point, tlen = 0. + // + // While tlen < qlen, do the following: + // V = HMAC_K(V) + // T = T || V + // + // Note that because the hash function output is the same length as the + // private key in this optimized implementation, there is no need to + // loop or create an intermediate T. + hasher.Reset() + hasher.Write(v) + v = hasher.Sum() + + // Step H3. + // + // k = bits2int(T) + // If k is within the range [1,q-1], return it. + // + // Otherwise, compute: + // K = HMAC_K(V || 0x00) + // V = HMAC_K(V) + var secret ModNScalar + overflow := secret.SetByteSlice(v) + if !overflow && !secret.IsZero() { + generated++ + if generated > extraIterations { + return &secret + } + } + + // K = HMAC_K(V || 0x00) + hasher.Reset() + hasher.Write(v) + hasher.Write(singleZero[:]) + k = hasher.Sum() + + // V = HMAC_K(V) + hasher.ResetKey(k) + hasher.Write(v) + v = hasher.Sum() + } +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go new file mode 100644 index 0000000000..3ca5b7c2f3 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/privkey.go @@ -0,0 +1,89 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +import ( + csprng "crypto/rand" +) + +// PrivateKey provides facilities for working with secp256k1 private keys within +// this package and includes functionality such as serializing and parsing them +// as well as computing their associated public key. +type PrivateKey struct { + Key ModNScalar +} + +// NewPrivateKey instantiates a new private key from a scalar encoded as a +// big integer. +func NewPrivateKey(key *ModNScalar) *PrivateKey { + return &PrivateKey{Key: *key} +} + +// PrivKeyFromBytes returns a private based on the provided byte slice which is +// interpreted as an unsigned 256-bit big-endian integer in the range [0, N-1], +// where N is the order of the curve. +// +// Note that this means passing a slice with more than 32 bytes is truncated and +// that truncated value is reduced modulo N. It is up to the caller to either +// provide a value in the appropriate range or choose to accept the described +// behavior. +// +// Typically callers should simply make use of GeneratePrivateKey when creating +// private keys which properly handles generation of appropriate values. +func PrivKeyFromBytes(privKeyBytes []byte) *PrivateKey { + var privKey PrivateKey + privKey.Key.SetByteSlice(privKeyBytes) + return &privKey +} + +// GeneratePrivateKey generates and returns a new cryptographically secure +// private key that is suitable for use with secp256k1. +func GeneratePrivateKey() (*PrivateKey, error) { + // The group order is close enough to 2^256 that there is only roughly a 1 + // in 2^128 chance of generating an invalid private key, so this loop will + // virtually never run more than a single iteration in practice. + var key PrivateKey + var b32 [32]byte + for valid := false; !valid; { + if _, err := csprng.Read(b32[:]); err != nil { + return nil, err + } + + // The private key is only valid when it is in the range [1, N-1], where + // N is the order of the curve. + overflow := key.Key.SetBytes(&b32) + valid = (key.Key.IsZeroBit() | overflow) == 0 + } + zeroArray32(&b32) + + return &key, nil +} + +// PubKey computes and returns the public key corresponding to this private key. +func (p *PrivateKey) PubKey() *PublicKey { + var result JacobianPoint + ScalarBaseMultNonConst(&p.Key, &result) + result.ToAffine() + return NewPublicKey(&result.X, &result.Y) +} + +// Zero manually clears the memory associated with the private key. This can be +// used to explicitly clear key material from memory for enhanced security +// against memory scraping. +func (p *PrivateKey) Zero() { + p.Key.Zero() +} + +// PrivKeyBytesLen defines the length in bytes of a serialized private key. +const PrivKeyBytesLen = 32 + +// Serialize returns the private key as a 256-bit big-endian binary-encoded +// number, padded to a length of 32 bytes. +func (p PrivateKey) Serialize() []byte { + var privKeyBytes [PrivKeyBytesLen]byte + p.Key.PutBytes(&privKeyBytes) + return privKeyBytes[:] +} diff --git a/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go new file mode 100644 index 0000000000..54c54be5f1 --- /dev/null +++ b/vendor/github.com/decred/dcrd/dcrec/secp256k1/v4/pubkey.go @@ -0,0 +1,237 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Copyright (c) 2015-2022 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package secp256k1 + +// References: +// [SEC1] Elliptic Curve Cryptography +// https://www.secg.org/sec1-v2.pdf +// +// [SEC2] Recommended Elliptic Curve Domain Parameters +// https://www.secg.org/sec2-v2.pdf +// +// [ANSI X9.62-1998] Public Key Cryptography For The Financial Services +// Industry: The Elliptic Curve Digital Signature Algorithm (ECDSA) + +import ( + "fmt" +) + +const ( + // PubKeyBytesLenCompressed is the number of bytes of a serialized + // compressed public key. + PubKeyBytesLenCompressed = 33 + + // PubKeyBytesLenUncompressed is the number of bytes of a serialized + // uncompressed public key. + PubKeyBytesLenUncompressed = 65 + + // PubKeyFormatCompressedEven is the identifier prefix byte for a public key + // whose Y coordinate is even when serialized in the compressed format per + // section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4). + PubKeyFormatCompressedEven byte = 0x02 + + // PubKeyFormatCompressedOdd is the identifier prefix byte for a public key + // whose Y coordinate is odd when serialized in the compressed format per + // section 2.3.4 of [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.4). + PubKeyFormatCompressedOdd byte = 0x03 + + // PubKeyFormatUncompressed is the identifier prefix byte for a public key + // when serialized according in the uncompressed format per section 2.3.3 of + // [SEC1](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3). + PubKeyFormatUncompressed byte = 0x04 + + // PubKeyFormatHybridEven is the identifier prefix byte for a public key + // whose Y coordinate is even when serialized according to the hybrid format + // per section 4.3.6 of [ANSI X9.62-1998]. + // + // NOTE: This format makes little sense in practice an therefore this + // package will not produce public keys serialized in this format. However, + // it will parse them since they exist in the wild. + PubKeyFormatHybridEven byte = 0x06 + + // PubKeyFormatHybridOdd is the identifier prefix byte for a public key + // whose Y coordingate is odd when serialized according to the hybrid format + // per section 4.3.6 of [ANSI X9.62-1998]. + // + // NOTE: This format makes little sense in practice an therefore this + // package will not produce public keys serialized in this format. However, + // it will parse them since they exist in the wild. + PubKeyFormatHybridOdd byte = 0x07 +) + +// PublicKey provides facilities for efficiently working with secp256k1 public +// keys within this package and includes functions to serialize in both +// uncompressed and compressed SEC (Standards for Efficient Cryptography) +// formats. +type PublicKey struct { + x FieldVal + y FieldVal +} + +// NewPublicKey instantiates a new public key with the given x and y +// coordinates. +// +// It should be noted that, unlike ParsePubKey, since this accepts arbitrary x +// and y coordinates, it allows creation of public keys that are not valid +// points on the secp256k1 curve. The IsOnCurve method of the returned instance +// can be used to determine validity. +func NewPublicKey(x, y *FieldVal) *PublicKey { + var pubKey PublicKey + pubKey.x.Set(x) + pubKey.y.Set(y) + return &pubKey +} + +// ParsePubKey parses a secp256k1 public key encoded according to the format +// specified by ANSI X9.62-1998, which means it is also compatible with the +// SEC (Standards for Efficient Cryptography) specification which is a subset of +// the former. In other words, it supports the uncompressed, compressed, and +// hybrid formats as follows: +// +// Compressed: +// +// <32-byte X coordinate> +// +// Uncompressed: +// +// <32-byte X coordinate><32-byte Y coordinate> +// +// Hybrid: +// +// <32-byte X coordinate><32-byte Y coordinate> +// +// NOTE: The hybrid format makes little sense in practice an therefore this +// package will not produce public keys serialized in this format. However, +// this function will properly parse them since they exist in the wild. +func ParsePubKey(serialized []byte) (key *PublicKey, err error) { + var x, y FieldVal + switch len(serialized) { + case PubKeyBytesLenUncompressed: + // Reject unsupported public key formats for the given length. + format := serialized[0] + switch format { + case PubKeyFormatUncompressed: + case PubKeyFormatHybridEven, PubKeyFormatHybridOdd: + default: + str := fmt.Sprintf("invalid public key: unsupported format: %x", + format) + return nil, makeError(ErrPubKeyInvalidFormat, str) + } + + // Parse the x and y coordinates while ensuring that they are in the + // allowed range. + if overflow := x.SetByteSlice(serialized[1:33]); overflow { + str := "invalid public key: x >= field prime" + return nil, makeError(ErrPubKeyXTooBig, str) + } + if overflow := y.SetByteSlice(serialized[33:]); overflow { + str := "invalid public key: y >= field prime" + return nil, makeError(ErrPubKeyYTooBig, str) + } + + // Ensure the oddness of the y coordinate matches the specified format + // for hybrid public keys. + if format == PubKeyFormatHybridEven || format == PubKeyFormatHybridOdd { + wantOddY := format == PubKeyFormatHybridOdd + if y.IsOdd() != wantOddY { + str := fmt.Sprintf("invalid public key: y oddness does not "+ + "match specified value of %v", wantOddY) + return nil, makeError(ErrPubKeyMismatchedOddness, str) + } + } + + // Reject public keys that are not on the secp256k1 curve. + if !isOnCurve(&x, &y) { + str := fmt.Sprintf("invalid public key: [%v,%v] not on secp256k1 "+ + "curve", x, y) + return nil, makeError(ErrPubKeyNotOnCurve, str) + } + + case PubKeyBytesLenCompressed: + // Reject unsupported public key formats for the given length. + format := serialized[0] + switch format { + case PubKeyFormatCompressedEven, PubKeyFormatCompressedOdd: + default: + str := fmt.Sprintf("invalid public key: unsupported format: %x", + format) + return nil, makeError(ErrPubKeyInvalidFormat, str) + } + + // Parse the x coordinate while ensuring that it is in the allowed + // range. + if overflow := x.SetByteSlice(serialized[1:33]); overflow { + str := "invalid public key: x >= field prime" + return nil, makeError(ErrPubKeyXTooBig, str) + } + + // Attempt to calculate the y coordinate for the given x coordinate such + // that the result pair is a point on the secp256k1 curve and the + // solution with desired oddness is chosen. + wantOddY := format == PubKeyFormatCompressedOdd + if !DecompressY(&x, wantOddY, &y) { + str := fmt.Sprintf("invalid public key: x coordinate %v is not on "+ + "the secp256k1 curve", x) + return nil, makeError(ErrPubKeyNotOnCurve, str) + } + y.Normalize() + + default: + str := fmt.Sprintf("malformed public key: invalid length: %d", + len(serialized)) + return nil, makeError(ErrPubKeyInvalidLen, str) + } + + return NewPublicKey(&x, &y), nil +} + +// SerializeUncompressed serializes a public key in the 65-byte uncompressed +// format. +func (p PublicKey) SerializeUncompressed() []byte { + // 0x04 || 32-byte x coordinate || 32-byte y coordinate + var b [PubKeyBytesLenUncompressed]byte + b[0] = PubKeyFormatUncompressed + p.x.PutBytesUnchecked(b[1:33]) + p.y.PutBytesUnchecked(b[33:65]) + return b[:] +} + +// SerializeCompressed serializes a public key in the 33-byte compressed format. +func (p PublicKey) SerializeCompressed() []byte { + // Choose the format byte depending on the oddness of the Y coordinate. + format := PubKeyFormatCompressedEven + if p.y.IsOdd() { + format = PubKeyFormatCompressedOdd + } + + // 0x02 or 0x03 || 32-byte x coordinate + var b [PubKeyBytesLenCompressed]byte + b[0] = format + p.x.PutBytesUnchecked(b[1:33]) + return b[:] +} + +// IsEqual compares this public key instance to the one passed, returning true +// if both public keys are equivalent. A public key is equivalent to another, +// if they both have the same X and Y coordinates. +func (p *PublicKey) IsEqual(otherPubKey *PublicKey) bool { + return p.x.Equals(&otherPubKey.x) && p.y.Equals(&otherPubKey.y) +} + +// AsJacobian converts the public key into a Jacobian point with Z=1 and stores +// the result in the provided result param. This allows the public key to be +// treated a Jacobian point in the secp256k1 group in calculations. +func (p *PublicKey) AsJacobian(result *JacobianPoint) { + result.X.Set(&p.x) + result.Y.Set(&p.y) + result.Z.SetInt(1) +} + +// IsOnCurve returns whether or not the public key represents a point on the +// secp256k1 curve. +func (p *PublicKey) IsOnCurve() bool { + return isOnCurve(&p.x, &p.y) +} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/doc.go b/vendor/github.com/go-kit/kit/tracing/opentracing/doc.go deleted file mode 100644 index c8d32d580a..0000000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package opentracing provides Go kit integration to the OpenTracing project. -// OpenTracing implements a general purpose interface that microservices can -// program against, and which adapts to all major distributed tracing systems. -package opentracing diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go b/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go deleted file mode 100644 index 4df1ef2648..0000000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint.go +++ /dev/null @@ -1,124 +0,0 @@ -package opentracing - -import ( - "context" - "strconv" - - "github.com/opentracing/opentracing-go" - otext "github.com/opentracing/opentracing-go/ext" - otlog "github.com/opentracing/opentracing-go/log" - - "github.com/go-kit/kit/endpoint" - "github.com/go-kit/kit/sd/lb" -) - -// TraceEndpoint returns a Middleware that wraps the `next` Endpoint in an -// OpenTracing Span called `operationName`. -// -// If `ctx` already has a Span, child span is created from it. -// If `ctx` doesn't yet have a Span, the new one is created. -func TraceEndpoint(tracer opentracing.Tracer, operationName string, opts ...EndpointOption) endpoint.Middleware { - cfg := &EndpointOptions{ - Tags: make(opentracing.Tags), - } - - for _, opt := range opts { - opt(cfg) - } - - return func(next endpoint.Endpoint) endpoint.Endpoint { - return func(ctx context.Context, request interface{}) (response interface{}, err error) { - if cfg.GetOperationName != nil { - if newOperationName := cfg.GetOperationName(ctx, operationName); newOperationName != "" { - operationName = newOperationName - } - } - - var span opentracing.Span - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - span = tracer.StartSpan( - operationName, - opentracing.ChildOf(parentSpan.Context()), - ) - } else { - span = tracer.StartSpan(operationName) - } - defer span.Finish() - - applyTags(span, cfg.Tags) - if cfg.GetTags != nil { - extraTags := cfg.GetTags(ctx) - applyTags(span, extraTags) - } - - ctx = opentracing.ContextWithSpan(ctx, span) - - defer func() { - if err != nil { - if lbErr, ok := err.(lb.RetryError); ok { - // handle errors originating from lb.Retry - fields := make([]otlog.Field, 0, len(lbErr.RawErrors)) - for idx, rawErr := range lbErr.RawErrors { - fields = append(fields, otlog.String( - "gokit.retry.error."+strconv.Itoa(idx+1), rawErr.Error(), - )) - } - - otext.LogError(span, lbErr, fields...) - - return - } - - // generic error - otext.LogError(span, err) - - return - } - - // test for business error - if res, ok := response.(endpoint.Failer); ok && res.Failed() != nil { - span.LogFields( - otlog.String("gokit.business.error", res.Failed().Error()), - ) - - if cfg.IgnoreBusinessError { - return - } - - // treating business error as real error in span. - otext.LogError(span, res.Failed()) - - return - } - }() - - return next(ctx, request) - } - } -} - -// TraceServer returns a Middleware that wraps the `next` Endpoint in an -// OpenTracing Span called `operationName` with server span.kind tag.. -func TraceServer(tracer opentracing.Tracer, operationName string, opts ...EndpointOption) endpoint.Middleware { - opts = append(opts, WithTags(map[string]interface{}{ - otext.SpanKindRPCServer.Key: otext.SpanKindRPCServer.Value, - })) - - return TraceEndpoint(tracer, operationName, opts...) -} - -// TraceClient returns a Middleware that wraps the `next` Endpoint in an -// OpenTracing Span called `operationName` with client span.kind tag. -func TraceClient(tracer opentracing.Tracer, operationName string, opts ...EndpointOption) endpoint.Middleware { - opts = append(opts, WithTags(map[string]interface{}{ - otext.SpanKindRPCClient.Key: otext.SpanKindRPCClient.Value, - })) - - return TraceEndpoint(tracer, operationName, opts...) -} - -func applyTags(span opentracing.Span, tags opentracing.Tags) { - for key, value := range tags { - span.SetTag(key, value) - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_options.go b/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_options.go deleted file mode 100644 index 6854271afe..0000000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/endpoint_options.go +++ /dev/null @@ -1,74 +0,0 @@ -package opentracing - -import ( - "context" - - "github.com/opentracing/opentracing-go" -) - -// EndpointOptions holds the options for tracing an endpoint -type EndpointOptions struct { - // IgnoreBusinessError if set to true will not treat a business error - // identified through the endpoint.Failer interface as a span error. - IgnoreBusinessError bool - - // GetOperationName is an optional function that can set the span operation name based on the existing one - // for the endpoint and information in the context. - // - // If the function is nil, or the returned name is empty, the existing name for the endpoint is used. - GetOperationName func(ctx context.Context, name string) string - - // Tags holds the default tags which will be set on span - // creation by our Endpoint middleware. - Tags opentracing.Tags - - // GetTags is an optional function that can extract tags - // from the context and add them to the span. - GetTags func(ctx context.Context) opentracing.Tags -} - -// EndpointOption allows for functional options to endpoint tracing middleware. -type EndpointOption func(*EndpointOptions) - -// WithOptions sets all configuration options at once by use of the EndpointOptions struct. -func WithOptions(options EndpointOptions) EndpointOption { - return func(o *EndpointOptions) { - *o = options - } -} - -// WithIgnoreBusinessError if set to true will not treat a business error -// identified through the endpoint.Failer interface as a span error. -func WithIgnoreBusinessError(ignoreBusinessError bool) EndpointOption { - return func(o *EndpointOptions) { - o.IgnoreBusinessError = ignoreBusinessError - } -} - -// WithOperationNameFunc allows to set function that can set the span operation name based on the existing one -// for the endpoint and information in the context. -func WithOperationNameFunc(getOperationName func(ctx context.Context, name string) string) EndpointOption { - return func(o *EndpointOptions) { - o.GetOperationName = getOperationName - } -} - -// WithTags adds default tags for the spans created by the Endpoint tracer. -func WithTags(tags opentracing.Tags) EndpointOption { - return func(o *EndpointOptions) { - if o.Tags == nil { - o.Tags = make(opentracing.Tags) - } - - for key, value := range tags { - o.Tags[key] = value - } - } -} - -// WithTagsFunc set the func to extracts additional tags from the context. -func WithTagsFunc(getTags func(ctx context.Context) opentracing.Tags) EndpointOption { - return func(o *EndpointOptions) { - o.GetTags = getTags - } -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go b/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go deleted file mode 100644 index 3b0a65d83d..0000000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/grpc.go +++ /dev/null @@ -1,70 +0,0 @@ -package opentracing - -import ( - "context" - "encoding/base64" - "strings" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/metadata" - - "github.com/go-kit/log" -) - -// ContextToGRPC returns a grpc RequestFunc that injects an OpenTracing Span -// found in `ctx` into the grpc Metadata. If no such Span can be found, the -// RequestFunc is a noop. -func ContextToGRPC(tracer opentracing.Tracer, logger log.Logger) func(ctx context.Context, md *metadata.MD) context.Context { - return func(ctx context.Context, md *metadata.MD) context.Context { - if span := opentracing.SpanFromContext(ctx); span != nil { - // There's nothing we can do with an error here. - if err := tracer.Inject(span.Context(), opentracing.TextMap, metadataReaderWriter{md}); err != nil { - logger.Log("err", err) - } - } - return ctx - } -} - -// GRPCToContext returns a grpc RequestFunc that tries to join with an -// OpenTracing trace found in `req` and starts a new Span called -// `operationName` accordingly. If no trace could be found in `req`, the Span -// will be a trace root. The Span is incorporated in the returned Context and -// can be retrieved with opentracing.SpanFromContext(ctx). -func GRPCToContext(tracer opentracing.Tracer, operationName string, logger log.Logger) func(ctx context.Context, md metadata.MD) context.Context { - return func(ctx context.Context, md metadata.MD) context.Context { - var span opentracing.Span - wireContext, err := tracer.Extract(opentracing.TextMap, metadataReaderWriter{&md}) - if err != nil && err != opentracing.ErrSpanContextNotFound { - logger.Log("err", err) - } - span = tracer.StartSpan(operationName, ext.RPCServerOption(wireContext)) - return opentracing.ContextWithSpan(ctx, span) - } -} - -// A type that conforms to opentracing.TextMapReader and -// opentracing.TextMapWriter. -type metadataReaderWriter struct { - *metadata.MD -} - -func (w metadataReaderWriter) Set(key, val string) { - key = strings.ToLower(key) - if strings.HasSuffix(key, "-bin") { - val = base64.StdEncoding.EncodeToString([]byte(val)) - } - (*w.MD)[key] = append((*w.MD)[key], val) -} - -func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { - for k, vals := range *w.MD { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/go-kit/kit/tracing/opentracing/http.go b/vendor/github.com/go-kit/kit/tracing/opentracing/http.go deleted file mode 100644 index 29f04420b7..0000000000 --- a/vendor/github.com/go-kit/kit/tracing/opentracing/http.go +++ /dev/null @@ -1,71 +0,0 @@ -package opentracing - -import ( - "context" - "net" - "net/http" - "strconv" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - - kithttp "github.com/go-kit/kit/transport/http" - "github.com/go-kit/log" -) - -// ContextToHTTP returns an http RequestFunc that injects an OpenTracing Span -// found in `ctx` into the http headers. If no such Span can be found, the -// RequestFunc is a noop. -func ContextToHTTP(tracer opentracing.Tracer, logger log.Logger) kithttp.RequestFunc { - return func(ctx context.Context, req *http.Request) context.Context { - // Try to find a Span in the Context. - if span := opentracing.SpanFromContext(ctx); span != nil { - // Add standard OpenTracing tags. - ext.HTTPMethod.Set(span, req.Method) - ext.HTTPUrl.Set(span, req.URL.String()) - host, portString, err := net.SplitHostPort(req.URL.Host) - if err == nil { - ext.PeerHostname.Set(span, host) - if port, err := strconv.Atoi(portString); err == nil { - ext.PeerPort.Set(span, uint16(port)) - } - } else { - ext.PeerHostname.Set(span, req.URL.Host) - } - - // There's nothing we can do with any errors here. - if err = tracer.Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header), - ); err != nil { - logger.Log("err", err) - } - } - return ctx - } -} - -// HTTPToContext returns an http RequestFunc that tries to join with an -// OpenTracing trace found in `req` and starts a new Span called -// `operationName` accordingly. If no trace could be found in `req`, the Span -// will be a trace root. The Span is incorporated in the returned Context and -// can be retrieved with opentracing.SpanFromContext(ctx). -func HTTPToContext(tracer opentracing.Tracer, operationName string, logger log.Logger) kithttp.RequestFunc { - return func(ctx context.Context, req *http.Request) context.Context { - // Try to join to a trace propagated in `req`. - var span opentracing.Span - wireContext, err := tracer.Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header), - ) - if err != nil && err != opentracing.ErrSpanContextNotFound { - logger.Log("err", err) - } - - span = tracer.StartSpan(operationName, ext.RPCServerOption(wireContext)) - ext.HTTPMethod.Set(span, req.Method) - ext.HTTPUrl.Set(span, req.URL.String()) - return opentracing.ContextWithSpan(ctx, span) - } -} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 0000000000..94ff801df1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000000..c356960046 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000000..5d37e294c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 0000000000..ab59311813 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,282 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 0000000000..9d92a38f1d --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return Logger{ + level: 0, + sink: discardLogSink{}, + } +} + +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} + +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { + return false +} + +func (l discardLogSink) Info(int, string, ...interface{}) { +} + +func (l discardLogSink) Error(error, string, ...interface{}) { +} + +func (l discardLogSink) WithValues(...interface{}) LogSink { + return l +} + +func (l discardLogSink) WithName(string) LogSink { + return l +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000000..7accdb0c40 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 0000000000..c3b56b3d2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,510 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} +} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE similarity index 100% rename from vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/LICENSE rename to vendor/github.com/go-logr/stdr/LICENSE diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000000..5158667890 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000000..93a8aab51b --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/goccy/go-json/.codecov.yml b/vendor/github.com/goccy/go-json/.codecov.yml new file mode 100644 index 0000000000..e98134570c --- /dev/null +++ b/vendor/github.com/goccy/go-json/.codecov.yml @@ -0,0 +1,32 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + target: 70% + threshold: 2% + patch: off + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header,diff" + behavior: default + require_changes: no + +ignore: + - internal/encoder/vm_color + - internal/encoder/vm_color_indent diff --git a/vendor/github.com/goccy/go-json/.gitignore b/vendor/github.com/goccy/go-json/.gitignore new file mode 100644 index 0000000000..378283829c --- /dev/null +++ b/vendor/github.com/goccy/go-json/.gitignore @@ -0,0 +1,2 @@ +cover.html +cover.out diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml new file mode 100644 index 0000000000..57ae5a528f --- /dev/null +++ b/vendor/github.com/goccy/go-json/.golangci.yml @@ -0,0 +1,83 @@ +run: + skip-files: + - encode_optype.go + - ".*_test\\.go$" + +linters-settings: + govet: + enable-all: true + disable: + - shadow + +linters: + enable-all: true + disable: + - dogsled + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - gomnd + - gosec + - ifshort + - lll + - makezero + - nakedret + - nestif + - nlreturn + - paralleltest + - testpackage + - thelper + - wrapcheck + - interfacer + - lll + - nakedret + - nestif + - nlreturn + - testpackage + - wsl + - varnamelen + - nilnil + - ireturn + - govet + - forcetypeassert + - cyclop + - containedctx + - revive + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - golint + - path: rtype.go + linters: + - golint + - stylecheck + - path: error.go + linters: + - staticcheck + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md new file mode 100644 index 0000000000..d63009fd7d --- /dev/null +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -0,0 +1,393 @@ +# v0.9.11 - 2022/08/18 + +### Fix bugs + +* Fix unexpected behavior when buffer ends with backslash ( #383 ) +* Fix stream decoding of escaped character ( #387 ) + +# v0.9.10 - 2022/07/15 + +### Fix bugs + +* Fix boundary exception of type caching ( #382 ) + +# v0.9.9 - 2022/07/15 + +### Fix bugs + +* Fix encoding of directed interface with typed nil ( #377 ) +* Fix embedded primitive type encoding using alias ( #378 ) +* Fix slice/array type encoding with types implementing MarshalJSON ( #379 ) +* Fix unicode decoding when the expected buffer state is not met after reading ( #380 ) + +# v0.9.8 - 2022/06/30 + +### Fix bugs + +* Fix decoding of surrogate-pair ( #365 ) +* Fix handling of embedded primitive type ( #366 ) +* Add validation of escape sequence for decoder ( #367 ) +* Fix stream tokenizing respecting UseNumber ( #369 ) +* Fix encoding when struct pointer type that implements Marshal JSON is embedded ( #375 ) + +### Improve performance + +* Improve performance of linkRecursiveCode ( #368 ) + +# v0.9.7 - 2022/04/22 + +### Fix bugs + +#### Encoder + +* Add filtering process for encoding on slow path ( #355 ) +* Fix encoding of interface{} with pointer type ( #363 ) + +#### Decoder + +* Fix map key decoder that implements UnmarshalJSON ( #353 ) +* Fix decoding of []uint8 type ( #361 ) + +### New features + +* Add DebugWith option for encoder ( #356 ) + +# v0.9.6 - 2022/03/22 + +### Fix bugs + +* Correct the handling of the minimum value of int type for decoder ( #344 ) +* Fix bugs of stream decoder's bufferSize ( #349 ) +* Add a guard to use typeptr more safely ( #351 ) + +### Improve decoder performance + +* Improve escapeString's performance ( #345 ) + +### Others + +* Update go version for CI ( #347 ) + +# v0.9.5 - 2022/03/04 + +### Fix bugs + +* Fix panic when decoding time.Time with context ( #328 ) +* Fix reading the next character in buffer to nul consideration ( #338 ) +* Fix incorrect handling on skipValue ( #341 ) + +### Improve decoder performance + +* Improve performance when a payload contains escape sequence ( #334 ) + +# v0.9.4 - 2022/01/21 + +* Fix IsNilForMarshaler for string type with omitempty ( #323 ) +* Fix the case where the embedded field is at the end ( #326 ) + +# v0.9.3 - 2022/01/14 + +* Fix logic of removing struct field for decoder ( #322 ) + +# v0.9.2 - 2022/01/14 + +* Add invalid decoder to delay type error judgment at decode ( #321 ) + +# v0.9.1 - 2022/01/11 + +* Fix encoding of MarshalText/MarshalJSON operation with head offset ( #319 ) + +# v0.9.0 - 2022/01/05 + +### New feature + +* Supports dynamic filtering of struct fields ( #314 ) + +### Improve encoding performance + +* Improve map encoding performance ( #310 ) +* Optimize encoding path for escaped string ( #311 ) +* Add encoding option for performance ( #312 ) + +### Fix bugs + +* Fix panic at encoding map value on 1.18 ( #310 ) +* Fix MarshalIndent for interface type ( #317 ) + +# v0.8.1 - 2021/12/05 + +* Fix operation conversion from PtrHead to Head in Recursive type ( #305 ) + +# v0.8.0 - 2021/12/02 + +* Fix embedded field conflict behavior ( #300 ) +* Refactor compiler for encoder ( #301 #302 ) + +# v0.7.10 - 2021/10/16 + +* Fix conversion from pointer to uint64 ( #294 ) + +# v0.7.9 - 2021/09/28 + +* Fix encoding of nil value about interface type that has method ( #291 ) + +# v0.7.8 - 2021/09/01 + +* Fix mapassign_faststr for indirect struct type ( #283 ) +* Fix encoding of not empty interface type ( #284 ) +* Fix encoding of empty struct interface type ( #286 ) + +# v0.7.7 - 2021/08/25 + +* Fix invalid utf8 on stream decoder ( #279 ) +* Fix buffer length bug on string stream decoder ( #280 ) + +Thank you @orisano !! + +# v0.7.6 - 2021/08/13 + +* Fix nil slice assignment ( #276 ) +* Improve error message ( #277 ) + +# v0.7.5 - 2021/08/12 + +* Fix encoding of embedded struct with tags ( #265 ) +* Fix encoding of embedded struct that isn't first field ( #272 ) +* Fix decoding of binary type with escaped char ( #273 ) + +# v0.7.4 - 2021/07/06 + +* Fix encoding of indirect layout structure ( #264 ) + +# v0.7.3 - 2021/06/29 + +* Fix encoding of pointer type in empty interface ( #262 ) + +# v0.7.2 - 2021/06/26 + +### Fix decoder + +* Add decoder for func type to fix decoding of nil function value ( #257 ) +* Fix stream decoding of []byte type ( #258 ) + +### Performance + +* Improve decoding performance of map[string]interface{} type ( use `mapassign_faststr` ) ( #256 ) +* Improve encoding performance of empty interface type ( remove recursive calling of `vm.Run` ) ( #259 ) + +### Benchmark + +* Add bytedance/sonic as benchmark target ( #254 ) + +# v0.7.1 - 2021/06/18 + +### Fix decoder + +* Fix error when unmarshal empty array ( #253 ) + +# v0.7.0 - 2021/06/12 + +### Support context for MarshalJSON and UnmarshalJSON ( #248 ) + +* json.MarshalContext(context.Context, interface{}, ...json.EncodeOption) ([]byte, error) +* json.NewEncoder(io.Writer).EncodeContext(context.Context, interface{}, ...json.EncodeOption) error +* json.UnmarshalContext(context.Context, []byte, interface{}, ...json.DecodeOption) error +* json.NewDecoder(io.Reader).DecodeContext(context.Context, interface{}) error + +```go +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} +``` + +### Add DecodeFieldPriorityFirstWin option ( #242 ) + +In the default behavior, go-json, like encoding/json, will reflect the result of the last evaluation when a field with the same name exists. I've added new options to allow you to change this behavior. `json.DecodeFieldPriorityFirstWin` option reflects the result of the first evaluation if a field with the same name exists. This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated. + +### Fix encoder + +* Fix indent number contains recursive type ( #249 ) +* Fix encoding of using empty interface as map key ( #244 ) + +### Fix decoder + +* Fix decoding fields containing escaped characters ( #237 ) + +### Refactor + +* Move some tests to subdirectory ( #243 ) +* Refactor package layout for decoder ( #238 ) + +# v0.6.1 - 2021/06/02 + +### Fix encoder + +* Fix value of totalLength for encoding ( #236 ) + +# v0.6.0 - 2021/06/01 + +### Support Colorize option for encoding (#233) + +```go +b, err := json.MarshalWithOption(v, json.Colorize(json.DefaultColorScheme)) +if err != nil { + ... +} +fmt.Println(string(b)) // print colored json +``` + +### Refactor + +* Fix opcode layout - Adjust memory layout of the opcode to 128 bytes in a 64-bit environment ( #230 ) +* Refactor encode option ( #231 ) +* Refactor escape string ( #232 ) + +# v0.5.1 - 2021/5/20 + +### Optimization + +* Add type addrShift to enable bigger encoder/decoder cache ( #213 ) + +### Fix decoder + +* Keep original reference of slice element ( #229 ) + +### Refactor + +* Refactor Debug mode for encoding ( #226 ) +* Generate VM sources for encoding ( #227 ) +* Refactor validator for null/true/false for decoding ( #221 ) + +# v0.5.0 - 2021/5/9 + +### Supports using omitempty and string tags at the same time ( #216 ) + +### Fix decoder + +* Fix stream decoder for unicode char ( #215 ) +* Fix decoding of slice element ( #219 ) +* Fix calculating of buffer length for stream decoder ( #220 ) + +### Refactor + +* replace skipWhiteSpace goto by loop ( #212 ) + +# v0.4.14 - 2021/5/4 + +### Benchmark + +* Add valyala/fastjson to benchmark ( #193 ) +* Add benchmark task for CI ( #211 ) + +### Fix decoder + +* Fix decoding of slice with unmarshal json type ( #198 ) +* Fix decoding of null value for interface type that does not implement Unmarshaler ( #205 ) +* Fix decoding of null value to []byte by json.Unmarshal ( #206 ) +* Fix decoding of backslash char at the end of string ( #207 ) +* Fix stream decoder for null/true/false value ( #208 ) +* Fix stream decoder for slow reader ( #211 ) + +### Performance + +* If cap of slice is enough, reuse slice data for compatibility with encoding/json ( #200 ) + +# v0.4.13 - 2021/4/20 + +### Fix json.Compact and json.Indent + +* Support validation the input buffer for json.Compact and json.Indent ( #189 ) +* Optimize json.Compact and json.Indent ( improve memory footprint ) ( #190 ) + +# v0.4.12 - 2021/4/15 + +### Fix encoder + +* Fix unnecessary indent for empty slice type ( #181 ) +* Fix encoding of omitempty feature for the slice or interface type ( #183 ) +* Fix encoding custom types zero values with omitempty when marshaller exists ( #187 ) + +### Fix decoder + +* Fix decoder for invalid top level value ( #184 ) +* Fix decoder for invalid number value ( #185 ) + +# v0.4.11 - 2021/4/3 + +* Improve decoder performance for interface type + +# v0.4.10 - 2021/4/2 + +### Fix encoder + +* Fixed a bug when encoding slice and map containing recursive structures +* Fixed a logic to determine if indirect reference + +# v0.4.9 - 2021/3/29 + +### Add debug mode + +If you use `json.MarshalWithOption(v, json.Debug())` and `panic` occurred in `go-json`, produces debug information to console. + +### Support a new feature to compatible with encoding/json + +- invalid UTF-8 is coerced to valid UTF-8 ( without performance down ) + +### Fix encoder + +- Fixed handling of MarshalJSON of function type + +### Fix decoding of slice of pointer type + +If there is a pointer value, go-json will use it. (This behavior is necessary to achieve the ability to prioritize pre-filled values). However, since slices are reused internally, there was a bug that referred to the previous pointer value. Therefore, it is not necessary to refer to the pointer value in advance for the slice element, so we explicitly initialize slice element by `nil`. + +# v0.4.8 - 2021/3/21 + +### Reduce memory usage at compile time + +* go-json have used about 2GB of memory at compile time, but now it can compile with about less than 550MB. + +### Fix any encoder's bug + +* Add many test cases for encoder +* Fix composite type ( slice/array/map ) +* Fix pointer types +* Fix encoding of MarshalJSON or MarshalText or json.Number type + +### Refactor encoder + +* Change package layout for reducing memory usage at compile +* Remove anonymous and only operation +* Remove root property from encodeCompileContext and opcode + +### Fix CI + +* Add Go 1.16 +* Remove Go 1.13 +* Fix `make cover` task + +### Number/Delim/Token/RawMessage use the types defined in encoding/json by type alias + +# v0.4.7 - 2021/02/22 + +### Fix decoder + +* Fix decoding of deep recursive structure +* Fix decoding of embedded unexported pointer field +* Fix invalid test case +* Fix decoding of invalid value +* Fix decoding of prefilled value +* Fix not being able to return UnmarshalTypeError when it should be returned +* Fix decoding of null value +* Fix decoding of type of null string +* Use pre allocated pointer if exists it at decoding + +### Reduce memory usage at compile + +* Integrate int/int8/int16/int32/int64 and uint/uint8/uint16/uint32/uint64 operation to reduce memory usage at compile + +### Remove unnecessary optype diff --git a/vendor/github.com/goccy/go-json/LICENSE b/vendor/github.com/goccy/go-json/LICENSE new file mode 100644 index 0000000000..6449c8bff6 --- /dev/null +++ b/vendor/github.com/goccy/go-json/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile new file mode 100644 index 0000000000..363563ab9b --- /dev/null +++ b/vendor/github.com/goccy/go-json/Makefile @@ -0,0 +1,39 @@ +PKG := github.com/goccy/go-json + +BIN_DIR := $(CURDIR)/bin +PKGS := $(shell go list ./... | grep -v internal/cmd|grep -v test) +COVER_PKGS := $(foreach pkg,$(PKGS),$(subst $(PKG),.,$(pkg))) + +COMMA := , +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +COVERPKG_OPT := $(subst $(SPACE),$(COMMA),$(COVER_PKGS)) + +$(BIN_DIR): + @mkdir -p $(BIN_DIR) + +.PHONY: cover +cover: + go test -coverpkg=$(COVERPKG_OPT) -coverprofile=cover.out ./... + +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out + +.PHONY: lint +lint: golangci-lint + golangci-lint run + +golangci-lint: | $(BIN_DIR) + @{ \ + set -e; \ + GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \ + cd $$GOLANGCI_LINT_TMP_DIR; \ + go mod init tmp; \ + GOBIN=$(BIN_DIR) go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.36.0; \ + rm -rf $$GOLANGCI_LINT_TMP_DIR; \ + } + +.PHONY: generate +generate: + go generate ./internal/... diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md new file mode 100644 index 0000000000..5686237735 --- /dev/null +++ b/vendor/github.com/goccy/go-json/README.md @@ -0,0 +1,529 @@ +# go-json + +![Go](https://github.com/goccy/go-json/workflows/Go/badge.svg) +[![GoDoc](https://godoc.org/github.com/goccy/go-json?status.svg)](https://pkg.go.dev/github.com/goccy/go-json?tab=doc) +[![codecov](https://codecov.io/gh/goccy/go-json/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-json) + +Fast JSON encoder/decoder compatible with encoding/json for Go + + + +# Roadmap + +``` +* version ( expected release date ) + +* v0.9.0 + | + | while maintaining compatibility with encoding/json, we will add convenient APIs + | + v +* v1.0.0 +``` + +We are accepting requests for features that will be implemented between v0.9.0 and v.1.0.0. +If you have the API you need, please submit your issue [here](https://github.com/goccy/go-json/issues). + +# Features + +- Drop-in replacement of `encoding/json` +- Fast ( See [Benchmark section](https://github.com/goccy/go-json#benchmarks) ) +- Flexible customization with options +- Coloring the encoded string +- Can propagate context.Context to `MarshalJSON` or `UnmarshalJSON` +- Can dynamically filter the fields of the structure type-safely + +# Installation + +``` +go get github.com/goccy/go-json +``` + +# How to use + +Replace import statement from `encoding/json` to `github.com/goccy/go-json` + +``` +-import "encoding/json" ++import "github.com/goccy/go-json" +``` + +# JSON library comparison + +| name | encoder | decoder | compatible with `encoding/json` | +| :----: | :------: | :-----: | :-----------------------------: | +| encoding/json | yes | yes | N/A | +| [json-iterator/go](https://github.com/json-iterator/go) | yes | yes | partial | +| [easyjson](https://github.com/mailru/easyjson) | yes | yes | no | +| [gojay](https://github.com/francoispqt/gojay) | yes | yes | no | +| [segmentio/encoding/json](https://github.com/segmentio/encoding/tree/master/json) | yes | yes | partial | +| [jettison](https://github.com/wI2L/jettison) | yes | no | no | +| [simdjson-go](https://github.com/minio/simdjson-go) | no | yes | no | +| goccy/go-json | yes | yes | yes | + +- `json-iterator/go` isn't compatible with `encoding/json` in many ways (e.g. https://github.com/json-iterator/go/issues/229 ), but it hasn't been supported for a long time. +- `segmentio/encoding/json` is well supported for encoders, but some are not supported for decoder APIs such as `Token` ( streaming decode ) + +## Other libraries + +- [jingo](https://github.com/bet365/jingo) + +I tried the benchmark but it didn't work. +Also, it seems to panic when it receives an unexpected value because there is no error handling... + +- [ffjson](https://github.com/pquerna/ffjson) + +Benchmarking gave very slow results. +It seems that it is assumed that the user will use the buffer pool properly. +Also, development seems to have already stopped + +# Benchmarks + +``` +$ cd benchmarks +$ go test -bench . +``` + +## Encode + + + + +## Decode + + + + + + +# Fuzzing + +[go-json-fuzz](https://github.com/goccy/go-json-fuzz) is the repository for fuzzing tests. +If you run the test in this repository and find a bug, please commit to corpus to go-json-fuzz and report the issue to [go-json](https://github.com/goccy/go-json/issues). + +# How it works + +`go-json` is very fast in both encoding and decoding compared to other libraries. +It's easier to implement by using automatic code generation for performance or by using a dedicated interface, but `go-json` dares to stick to compatibility with `encoding/json` and is the simple interface. Despite this, we are developing with the aim of being the fastest library. + +Here, we explain the various speed-up techniques implemented by `go-json`. + +## Basic technique + +The techniques listed here are the ones used by most of the libraries listed above. + +### Buffer reuse + +Since the only value required for the result of `json.Marshal(interface{}) ([]byte, error)` is `[]byte`, the only value that must be allocated during encoding is the return value `[]byte` . + +Also, as the number of allocations increases, the performance will be affected, so the number of allocations should be kept as low as possible when creating `[]byte`. + +Therefore, there is a technique to reduce the number of times a new buffer must be allocated by reusing the buffer used for the previous encoding by using `sync.Pool`. + +Finally, you allocate a buffer that is as long as the resulting buffer and copy the contents into it, you only need to allocate the buffer once in theory. + +```go +type buffer struct { + data []byte +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return &buffer{data: make([]byte, 0, 1024)} + }, +} + +buf := bufPool.Get().(*buffer) +data := encode(buf.data) // reuse buf.data + +newBuf := make([]byte, len(data)) +copy(newBuf, buf) + +buf.data = data +bufPool.Put(buf) +``` + +### Elimination of reflection + +As you know, the reflection operation is very slow. + +Therefore, using the fact that the address position where the type information is stored is fixed for each binary ( we call this `typeptr` ), +we can use the address in the type information to call a pre-built optimized process. + +For example, you can get the address to the type information from `interface{}` as follows and you can use that information to call a process that does not have reflection. + +To process without reflection, pass a pointer (`unsafe.Pointer`) to the value is stored. + +```go + +type emptyInterface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +var typeToEncoder = map[uintptr]func(unsafe.Pointer)([]byte, error){} + +func Marshal(v interface{}) ([]byte, error) { + iface := (*emptyInterface)(unsafe.Pointer(&v) + typeptr := uintptr(iface.typ) + if enc, exists := typeToEncoder[typeptr]; exists { + return enc(iface.ptr) + } + ... +} +``` + +※ In reality, `typeToEncoder` can be referenced by multiple goroutines, so exclusive control is required. + +## Unique speed-up technique + +## Encoder + +### Do not escape arguments of `Marshal` + +`json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process. +In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped. + +Therefore, the arguments for `Marshal` and `Unmarshal` are always escape to the heap. +However, `go-json` can use the feature of `reflect.Type` while avoiding escaping. + +`reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package. +For this reason, to date `reflect.Type` is the same as `*reflect.rtype`. + +Therefore, by directly handling `*reflect.rtype`, which is an implementation of `reflect.Type`, it is possible to avoid escaping because it changes from `interface` to using `struct`. + +The technique for working with `*reflect.rtype` directly from `go-json` is implemented at [rtype.go](https://github.com/goccy/go-json/blob/master/internal/runtime/rtype.go) + +Also, the same technique is cut out as a library ( https://github.com/goccy/go-reflect ) + +Initially this feature was the default behavior of `go-json`. +But after careful testing, I found that I passed a large value to `json.Marshal()` and if the argument could not be assigned to the stack, it could not be properly escaped to the heap (a bug in the Go compiler). + +Therefore, this feature will be provided as an **optional** until this issue is resolved. + +To use it, add `NoEscape` like `MarshalNoEscape()` + +### Encoding using opcode sequence + +I explained that you can use `typeptr` to call a pre-built process from type information. + +In other libraries, this dedicated process is processed by making it an function calling like anonymous function, but function calls are inherently slow processes and should be avoided as much as possible. + +Therefore, `go-json` adopted the Instruction-based execution processing system, which is also used to implement virtual machines for programming language. + +If it is the first type to encode, create the opcode ( instruction ) sequence required for encoding. +From the second time onward, use `typeptr` to get the cached pre-built opcode sequence and encode it based on it. An example of the opcode sequence is shown below. + +```go +json.Marshal(struct{ + X int `json:"x"` + Y string `json:"y"` +}{X: 1, Y: "hello"}) +``` + +When encoding a structure like the one above, create a sequence of opcodes like this: + +``` +- opStructFieldHead ( `{` ) +- opStructFieldInt ( `"x": 1,` ) +- opStructFieldString ( `"y": "hello"` ) +- opStructEnd ( `}` ) +- opEnd +``` + +※ When processing each operation, write the letters on the right. + +In addition, each opcode is managed by the following structure ( +Pseudo code ). + +```go +type opType int +const ( + opStructFieldHead opType = iota + opStructFieldInt + opStructFieldStirng + opStructEnd + opEnd +) +type opcode struct { + op opType + key []byte + next *opcode +} +``` + +The process of encoding using the opcode sequence is roughly implemented as follows. + +```go +func encode(code *opcode, b []byte, p unsafe.Pointer) ([]byte, error) { + for { + switch code.op { + case opStructFieldHead: + b = append(b, '{') + code = code.next + case opStructFieldInt: + b = append(b, code.key...) + b = appendInt((*int)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructFieldString: + b = append(b, code.key...) + b = appendString((*string)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructEnd: + b = append(b, '}') + code = code.next + case opEnd: + goto END + } + } +END: + return b, nil +} +``` + +In this way, the huge `switch-case` is used to encode by manipulating the linked list opcodes to avoid unnecessary function calls. + +### Opcode sequence optimization + +One of the advantages of encoding using the opcode sequence is the ease of optimization. +The opcode sequence mentioned above is actually converted into the following optimized operations and used. + +``` +- opStructFieldHeadInt ( `{"x": 1,` ) +- opStructEndString ( `"y": "hello"}` ) +- opEnd +``` + +It has been reduced from 5 opcodes to 3 opcodes ! +Reducing the number of opcodees means reducing the number of branches with `switch-case`. +In other words, the closer the number of operations is to 1, the faster the processing can be performed. + +In `go-json`, optimization to reduce the number of opcodes itself like the above and it speeds up by preparing opcodes with optimized paths. + +### Change recursive call from CALL to JMP + +Recursive processing is required during encoding if the type is defined recursively as follows: + +```go +type T struct { + X int + U *U +} + +type U struct { + T *T +} + +b, err := json.Marshal(&T{ + X: 1, + U: &U{ + T: &T{ + X: 2, + }, + }, +}) +fmt.Println(string(b)) // {"X":1,"U":{"T":{"X":2,"U":null}}} +``` + +In `go-json`, recursive processing is processed by the operation type of ` opStructFieldRecursive`. + +In this operation, after acquiring the opcode sequence used for recursive processing, the function is **not** called recursively as it is, but the necessary values ​​are saved by itself and implemented by moving to the next operation. + +The technique of implementing recursive processing with the `JMP` operation while avoiding the `CALL` operation is a famous technique for implementing a high-speed virtual machine. + +For more details, please refer to [the article](https://engineering.mercari.com/blog/entry/1599563768-081104c850) ( but Japanese only ). + +### Dispatch by typeptr from map to slice + +When retrieving the data cached from the type information by `typeptr`, we usually use map. +Map requires exclusive control, so use `sync.Map` for a naive implementation. + +However, this is slow, so it's a good idea to use the `atomic` package for exclusive control as implemented by `segmentio/encoding/json` ( https://github.com/segmentio/encoding/blob/master/json/codec.go#L41-L55 ). + +This implementation slows down the set instead of speeding up the get, but it works well because of the nature of the library, it encodes much more for the same type. + +However, as a result of profiling, I noticed that `runtime.mapaccess2` accounts for a significant percentage of the execution time. So I thought if I could change the lookup from map to slice. + +There is an API named `typelinks` defined in the `runtime` package that the `reflect` package uses internally. +This allows you to get all the type information defined in the binary at runtime. + +The fact that all type information can be acquired means that by constructing slices in advance with the acquired total number of type information, it is possible to look up with the value of `typeptr` without worrying about out-of-range access. + +However, if there is too much type information, it will use a lot of memory, so by default we will only use this optimization if the slice size fits within **2Mib** . + +If this approach is not available, it will fall back to the `atomic` based process described above. + +If you want to know more, please refer to the implementation [here](https://github.com/goccy/go-json/blob/master/internal/runtime/type.go#L36-L100) + +## Decoder + +### Dispatch by typeptr from map to slice + +Like the encoder, the decoder also uses typeptr to call the dedicated process. + +### Faster termination character inspection using NUL character + +In order to decode, you have to traverse the input buffer character by position. +At that time, if you check whether the buffer has reached the end, it will be very slow. + +`buf` : `[]byte` type variable. holds the string passed to the decoder +`cursor` : `int64` type variable. holds the current read position + +```go +buflen := len(buf) +for ; cursor < buflen; cursor++ { // compare cursor and buflen at all times, it is so slow. + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + } +} +``` + +Therefore, by adding the `NUL` (`\000`) character to the end of the read buffer as shown below, it is possible to check the termination character at the same time as other characters. + +```go +for { + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Use Boundary Check Elimination + +Due to the `NUL` character optimization, the Go compiler does a boundary check every time, even though `buf[cursor]` does not cause out-of-range access. + +Therefore, `go-json` eliminates boundary check by fetching characters for hotspot by pointer operation. For example, the following code. + +```go +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +p := (*sliceHeader)(&unsafe.Pointer(buf)).data +for { + switch char(p, cursor) { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Checking the existence of fields of struct using Bitmaps + +I found by the profiling result, in the struct decode, lookup process for field was taking a long time. + +For example, consider decoding a string like `{"a":1,"b":2,"c":3}` into the following structure: + +```go +type T struct { + A int `json:"a"` + B int `json:"b"` + C int `json:"c"` +} +``` + +At this time, it was found that it takes a lot of time to acquire the decoding process corresponding to the field from the field name as shown below during the decoding process. + +```go +fieldName := decodeKey(buf, cursor) // "a" or "b" or "c" +decoder, exists := fieldToDecoderMap[fieldName] // so slow +if exists { + decoder(buf, cursor) +} else { + skipValue(buf, cursor) +} +``` + +To improve this process, `json-iterator/go` is optimized so that it can be branched by switch-case when the number of fields in the structure is 10 or less (switch-case is faster than map). However, there is a risk of hash collision because the value hashed by the FNV algorithm is used for conditional branching. Also, `gojay` processes this part at high speed by letting the library user yourself write `switch-case`. + + +`go-json` considers and implements a new approach that is different from these. I call this **bitmap field optimization**. + +The range of values ​​per character can be represented by `[256]byte`. Also, if the number of fields in the structure is 8 or less, `int8` type can represent the state of each field. +In other words, it has the following structure. + +- Base ( 8bit ): `00000000` +- Key "a": `00000001` ( assign key "a" to the first bit ) +- Key "b": `00000010` ( assign key "b" to the second bit ) +- Key "c": `00000100` ( assign key "c" to the third bit ) + +Bitmap structure is the following + +``` + | key index(0) | +------------------------ + 0 | 00000000 | + 1 | 00000000 | +~~ | | +97 (a) | 00000001 | +98 (b) | 00000010 | +99 (c) | 00000100 | +~~ | | +255 | 00000000 | +``` + +You can think of this as a Bitmap with a height of `256` and a width of the maximum string length in the field name. +In other words, it can be represented by the following type . + +```go +[maxFieldKeyLength][256]int8 +``` + +When decoding a field character, check whether the corresponding character exists by referring to the pre-built bitmap like the following. + +```go +var curBit int8 = math.MaxInt8 // 11111111 + +c := char(buf, cursor) +bit := bitmap[keyIdx][c] +curBit &= bit +if curBit == 0 { + // not found field +} +``` + +If `curBit` is not `0` until the end of the field string, then the string is +You may have hit one of the fields. +But the possibility is that if the decoded string is shorter than the field string, you will get a false hit. + +- input: `{"a":1}` +```go +type T struct { + X int `json:"abc"` +} +``` +※ Since `a` is shorter than `abc`, it can decode to the end of the field character without `curBit` being 0. + +Rest assured. In this case, it doesn't matter because you can tell if you hit by comparing the string length of `a` with the string length of `abc`. + +Finally, calculate the position of the bit where `1` is set and get the corresponding value, and you're done. + +Using this technique, field lookups are possible with only bitwise operations and access to slices. + +`go-json` uses a similar technique for fields with 9 or more and 16 or less fields. At this time, Bitmap is constructed as `[maxKeyLen][256]int16` type. + +Currently, this optimization is not performed when the maximum length of the field name is long (specifically, 64 bytes or more) in addition to the limitation of the number of fields from the viewpoint of saving memory usage. + +### Others + +I have done a lot of other optimizations. I will find time to write about them. If you have any questions about what's written here or other optimizations, please visit the `#go-json` channel on `gophers.slack.com` . + +## Reference + +Regarding the story of go-json, there are the following articles in Japanese only. + +- https://speakerdeck.com/goccy/zui-su-falsejsonraiburariwoqiu-mete +- https://engineering.mercari.com/blog/entry/1599563768-081104c850/ + +# Looking for Sponsors + +I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free. + +# License + +MIT diff --git a/vendor/github.com/goccy/go-json/color.go b/vendor/github.com/goccy/go-json/color.go new file mode 100644 index 0000000000..e80b22b486 --- /dev/null +++ b/vendor/github.com/goccy/go-json/color.go @@ -0,0 +1,68 @@ +package json + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +type ( + ColorFormat = encoder.ColorFormat + ColorScheme = encoder.ColorScheme +) + +const escape = "\x1b" + +type colorAttr int + +//nolint:deadcode,varcheck +const ( + fgBlackColor colorAttr = iota + 30 + fgRedColor + fgGreenColor + fgYellowColor + fgBlueColor + fgMagentaColor + fgCyanColor + fgWhiteColor +) + +//nolint:deadcode,varcheck +const ( + fgHiBlackColor colorAttr = iota + 90 + fgHiRedColor + fgHiGreenColor + fgHiYellowColor + fgHiBlueColor + fgHiMagentaColor + fgHiCyanColor + fgHiWhiteColor +) + +func createColorFormat(attr colorAttr) ColorFormat { + return ColorFormat{ + Header: wrapColor(attr), + Footer: resetColor(), + } +} + +func wrapColor(attr colorAttr) string { + return fmt.Sprintf("%s[%dm", escape, attr) +} + +func resetColor() string { + return wrapColor(colorAttr(0)) +} + +var ( + DefaultColorScheme = &ColorScheme{ + Int: createColorFormat(fgHiMagentaColor), + Uint: createColorFormat(fgHiMagentaColor), + Float: createColorFormat(fgHiMagentaColor), + Bool: createColorFormat(fgHiYellowColor), + String: createColorFormat(fgHiGreenColor), + Binary: createColorFormat(fgHiRedColor), + ObjectKey: createColorFormat(fgHiCyanColor), + Null: createColorFormat(fgBlueColor), + } +) diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go new file mode 100644 index 0000000000..d99749d05c --- /dev/null +++ b/vendor/github.com/goccy/go-json/decode.go @@ -0,0 +1,232 @@ +package json + +import ( + "context" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/decoder" + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type Decoder struct { + s *decoder.Stream +} + +const ( + nul = '\000' +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + rctx := decoder.TakeRuntimeContext() + rctx.Buf = src + rctx.Option.Flags = 0 + rctx.Option.Flags |= decoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + cursor, err := dec.Decode(rctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(rctx) + return err + } + decoder.ReleaseRuntimeContext(rctx) + return validateEndBuf(src, cursor) +} + +func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr)) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +//nolint:staticcheck +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func validateType(typ *runtime.Type, p uintptr) error { + if typ == nil || typ.Kind() != reflect.Ptr || p == 0 { + return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)} + } + return nil +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may +// read data from r beyond the JSON values requested. +func NewDecoder(r io.Reader) *Decoder { + s := decoder.NewStream(r) + return &Decoder{ + s: s, + } +} + +// Buffered returns a reader of the data remaining in the Decoder's +// buffer. The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.s.Buffered() +} + +// Decode reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about +// the conversion of JSON into a Go value. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeWithOption(v) +} + +// DecodeContext reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v with context.Context. +func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { + d.s.Option.Flags |= decoder.ContextOption + d.s.Option.Context = ctx + return d.DecodeWithOption(v) +} + +func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error { + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + ptr := uintptr(header.ptr) + typeptr := uintptr(unsafe.Pointer(typ)) + // noescape trick for header.typ ( reflect.*rtype ) + copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + + if err := validateType(copiedType, ptr); err != nil { + return err + } + + dec, err := decoder.CompileToGetDecoder(typ) + if err != nil { + return err + } + if err := d.s.PrepareForDecode(); err != nil { + return err + } + s := d.s + for _, optFunc := range optFuncs { + optFunc(s.Option) + } + if err := dec.DecodeStream(s, 0, header.ptr); err != nil { + return err + } + s.Reset() + return nil +} + +func (d *Decoder) More() bool { + return d.s.More() +} + +func (d *Decoder) Token() (Token, error) { + return d.s.Token() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + d.s.DisallowUnknownFields = true +} + +func (d *Decoder) InputOffset() int64 { + return d.s.TotalOffset() +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (d *Decoder) UseNumber() { + d.s.UseNumber = true +} diff --git a/vendor/github.com/goccy/go-json/docker-compose.yml b/vendor/github.com/goccy/go-json/docker-compose.yml new file mode 100644 index 0000000000..db40c79ad5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' +services: + go-json: + image: golang:1.18 + volumes: + - '.:/go/src/go-json' + deploy: + resources: + limits: + memory: 620M + working_dir: /go/src/go-json + command: | + sh -c "go test -c . && ls go-json.test" diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go new file mode 100644 index 0000000000..4bd899f38b --- /dev/null +++ b/vendor/github.com/goccy/go-json/encode.go @@ -0,0 +1,326 @@ +package json + +import ( + "context" + "io" + "os" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/encoder/vm" + "github.com/goccy/go-json/internal/encoder/vm_color" + "github.com/goccy/go-json/internal/encoder/vm_color_indent" + "github.com/goccy/go-json/internal/encoder/vm_indent" +) + +// An Encoder writes JSON values to an output stream. +type Encoder struct { + w io.Writer + enabledIndent bool + enabledHTMLEscape bool + prefix string + indentStr string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, enabledHTMLEscape: true} +} + +// Encode writes the JSON encoding of v to the stream, followed by a newline character. +// +// See the documentation for Marshal for details about the conversion of Go values to JSON. +func (e *Encoder) Encode(v interface{}) error { + return e.EncodeWithOption(v) +} + +// EncodeWithOption call Encode with EncodeOption. +func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error { + ctx := encoder.TakeRuntimeContext() + ctx.Option.Flag = 0 + + err := e.encodeWithOption(ctx, v, optFuncs...) + + encoder.ReleaseRuntimeContext(ctx) + return err +} + +// EncodeContext call Encode with context.Context and EncodeOption. +func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag |= encoder.ContextOption + rctx.Option.Context = ctx + + err := e.encodeWithOption(rctx, v, optFuncs...) + + encoder.ReleaseRuntimeContext(rctx) + return err +} + +func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error { + if e.enabledHTMLEscape { + ctx.Option.Flag |= encoder.HTMLEscapeOption + } + ctx.Option.Flag |= encoder.NormalizeUTF8Option + ctx.Option.DebugOut = os.Stdout + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + var ( + buf []byte + err error + ) + if e.enabledIndent { + buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr) + } else { + buf, err = encode(ctx, v) + } + if err != nil { + return err + } + if e.enabledIndent { + buf = buf[:len(buf)-2] + } else { + buf = buf[:len(buf)-1] + } + buf = append(buf, '\n') + if _, err := e.w.Write(buf); err != nil { + return err + } + return nil +} + +// SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings. +// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML. +// +// In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior. +func (e *Encoder) SetEscapeHTML(on bool) { + e.enabledHTMLEscape = on +} + +// SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent). +// Calling SetIndent("", "") disables indentation. +func (e *Encoder) SetIndent(prefix, indent string) { + if prefix == "" && indent == "" { + e.enabledIndent = false + return + } + e.prefix = prefix + e.indentStr = indent + e.enabledIndent = true +} + +func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + + buf, err := encode(rctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(rctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(rctx) + return copied, nil +} + +func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encode(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalNoEscape(v interface{}) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + + buf, err := encodeNoEscape(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encodeIndent(ctx, v, prefix, indent) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + buf = buf[:len(buf)-2] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + ctx.Buf = buf + return buf, nil +} + +func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendCommaIndent(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent) + + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.DebugRun(ctx, b, codeSet) + } + return vm.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.Run(ctx, b, codeSet) + } + return vm.Run(ctx, b, codeSet) +} + +func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) { + ctx.Prefix = []byte(prefix) + ctx.IndentStr = []byte(indent) + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.DebugRun(ctx, b, codeSet) + } + return vm_indent.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.Run(ctx, b, codeSet) + } + return vm_indent.Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go new file mode 100644 index 0000000000..94c1339a02 --- /dev/null +++ b/vendor/github.com/goccy/go-json/error.go @@ -0,0 +1,39 @@ +package json + +import ( + "github.com/goccy/go-json/internal/errors" +) + +// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when +// attempting to encode a string value with invalid UTF-8 sequences. +// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by +// replacing invalid bytes with the Unicode replacement rune U+FFFD. +// +// Deprecated: No longer used; kept for compatibility. +type InvalidUTF8Error = errors.InvalidUTF8Error + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError = errors.InvalidUnmarshalError + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError = errors.MarshalerError + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = errors.SyntaxError + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError = errors.UnmarshalFieldError + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError = errors.UnmarshalTypeError + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError = errors.UnsupportedTypeError + +type UnsupportedValueError = errors.UnsupportedValueError diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go new file mode 100644 index 0000000000..030cb7a974 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go @@ -0,0 +1,37 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type anonymousFieldDecoder struct { + structType *runtime.Type + offset uintptr + dec Decoder +} + +func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder { + return &anonymousFieldDecoder{ + structType: structType, + offset: offset, + dec: dec, + } +} + +func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go new file mode 100644 index 0000000000..21f1fd5852 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -0,0 +1,169 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type arrayDecoder struct { + elemType *runtime.Type + size uintptr + valueDecoder Decoder + alen int + structName string + fieldName string + zeroValue unsafe.Pointer +} + +func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { + zeroValue := *(*unsafe.Pointer)(unsafe_New(elemType)) + return &arrayDecoder{ + valueDecoder: dec, + elemType: elemType, + size: elemType.Size(), + alen: alen, + structName: structName, + fieldName: fieldName, + zeroValue: zeroValue, + } +} + +func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case '[': + idx := 0 + s.cursor++ + if s.skipWhiteSpace() == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + } + for { + if idx < d.alen { + if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil { + return err + } + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + idx++ + switch s.skipWhiteSpace() { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + case ',': + s.cursor++ + continue + case nul: + if s.read() { + s.cursor++ + continue + } + goto ERROR + default: + goto ERROR + } + } + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + s.cursor++ + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset()) +} + +func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '[': + idx := 0 + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + } + for { + if idx < d.alen { + c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + idx++ + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + case ',': + cursor++ + continue + default: + return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor) + } + } + default: + return 0, errors.ErrUnexpectedEndOfJSON("array", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go new file mode 100644 index 0000000000..455042a534 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go @@ -0,0 +1,78 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type boolDecoder struct { + structName string + fieldName string +} + +func newBoolDecoder(structName, fieldName string) *boolDecoder { + return &boolDecoder{structName: structName, fieldName: fieldName} +} + +func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case nul: + if s.read() { + c = s.char() + continue + } + goto ERROR + } + break + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset()) +} + +func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**bool)(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**bool)(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + } + return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go new file mode 100644 index 0000000000..92c7dcf64f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go @@ -0,0 +1,113 @@ +package decoder + +import ( + "encoding/base64" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type bytesDecoder struct { + typ *runtime.Type + sliceDecoder Decoder + stringDecoder *stringDecoder + structName string + fieldName string +} + +func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder { + var unmarshalDecoder Decoder + switch { + case runtime.PtrTo(typ).Implements(unmarshalJSONType): + unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName) + case runtime.PtrTo(typ).Implements(unmarshalTextType): + unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName) + default: + unmarshalDecoder, _ = compileUint8(typ, structName, fieldName) + } + return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName) +} + +func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder { + return &bytesDecoder{ + typ: typ, + sliceDecoder: byteUnmarshalerSliceDecoder(typ, structName, fieldName), + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + } +} + +func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamBinary(s, depth, p) + if err != nil { + return err + } + if bytes == nil { + s.reset() + return nil + } + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + buf := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(buf, bytes) + if err != nil { + return err + } + *(*[]byte)(p) = buf[:n] + s.reset() + return nil +} + +func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeBinary(ctx, cursor, depth, p) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + b := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(b, bytes) + if err != nil { + return 0, err + } + *(*[]byte)(p) = b[:n] + return cursor, nil +} + +func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { + c := s.skipWhiteSpace() + if c == '[' { + if d.sliceDecoder == nil { + return nil, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + err := d.sliceDecoder.DecodeStream(s, depth, p) + return nil, err + } + return d.stringDecoder.decodeStreamByte(s) +} + +func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '[' { + if d.sliceDecoder == nil { + return nil, 0, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: cursor, + } + } + c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p) + if err != nil { + return nil, 0, err + } + return nil, c, nil + } + return d.stringDecoder.decodeByte(buf, cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go new file mode 100644 index 0000000000..fab6437647 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -0,0 +1,487 @@ +package decoder + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "sync/atomic" + "unicode" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var ( + jsonNumberType = reflect.TypeOf(json.Number("")) + typeAddr *runtime.TypeAddr + cachedDecoderMap unsafe.Pointer // map[uintptr]decoder + cachedDecoder []Decoder +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadDecoderMap() map[uintptr]Decoder { + p := atomic.LoadPointer(&cachedDecoderMap) + return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) +} + +func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + newDecoderMap := make(map[uintptr]Decoder, len(m)+1) + newDecoderMap[typ] = dec + + for k, v := range m { + newDecoderMap[k] = v + } + + atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap))) +} + +func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) { + decoderMap := loadDecoderMap() + if dec, exists := decoderMap[typeptr]; exists { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + storeDecoder(typeptr, dec, decoderMap) + return dec, nil +} + +func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil + } + return compile(typ.Elem(), "", "", structTypeToDecoder) +} + +func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + + switch typ.Kind() { + case reflect.Ptr: + return compilePtr(typ, structName, fieldName, structTypeToDecoder) + case reflect.Struct: + return compileStruct(typ, structName, fieldName, structTypeToDecoder) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return compileBytes(elem, structName, fieldName) + } + return compileSlice(typ, structName, fieldName, structTypeToDecoder) + case reflect.Array: + return compileArray(typ, structName, fieldName, structTypeToDecoder) + case reflect.Map: + return compileMap(typ, structName, fieldName, structTypeToDecoder) + case reflect.Interface: + return compileInterface(typ, structName, fieldName) + case reflect.Uintptr: + return compileUint(typ, structName, fieldName) + case reflect.Int: + return compileInt(typ, structName, fieldName) + case reflect.Int8: + return compileInt8(typ, structName, fieldName) + case reflect.Int16: + return compileInt16(typ, structName, fieldName) + case reflect.Int32: + return compileInt32(typ, structName, fieldName) + case reflect.Int64: + return compileInt64(typ, structName, fieldName) + case reflect.Uint: + return compileUint(typ, structName, fieldName) + case reflect.Uint8: + return compileUint8(typ, structName, fieldName) + case reflect.Uint16: + return compileUint16(typ, structName, fieldName) + case reflect.Uint32: + return compileUint32(typ, structName, fieldName) + case reflect.Uint64: + return compileUint64(typ, structName, fieldName) + case reflect.String: + return compileString(typ, structName, fieldName) + case reflect.Bool: + return compileBool(structName, fieldName) + case reflect.Float32: + return compileFloat32(structName, fieldName) + case reflect.Float64: + return compileFloat64(structName, fieldName) + case reflect.Func: + return compileFunc(typ, structName, fieldName) + } + return newInvalidDecoder(typ, structName, fieldName), nil +} + +func isStringTagSupportedType(typ *runtime.Type) bool { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return false + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return false + } + switch typ.Kind() { + case reflect.Map: + return false + case reflect.Slice: + return false + case reflect.Array: + return false + case reflect.Struct: + return false + case reflect.Interface: + return false + } + return true +} + +func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + if runtime.PtrTo(typ).Implements(unmarshalTextType) { + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + if typ.Kind() == reflect.String { + return newStringDecoder(structName, fieldName), nil + } + dec, err := compile(typ, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + for { + switch t := dec.(type) { + case *stringDecoder, *interfaceDecoder: + return dec, nil + case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder: + return newWrappedStringDecoder(typ, dec, structName, fieldName), nil + case *ptrDecoder: + dec = t.dec + default: + return newInvalidDecoder(typ, structName, fieldName), nil + } + } +} + +func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil +} + +func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int)(p) = int(v) + }), nil +} + +func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int8)(p) = int8(v) + }), nil +} + +func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int16)(p) = int16(v) + }), nil +} + +func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int32)(p) = int32(v) + }), nil +} + +func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int64)(p) = v + }), nil +} + +func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint)(p) = uint(v) + }), nil +} + +func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint8)(p) = uint8(v) + }), nil +} + +func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint16)(p) = uint16(v) + }), nil +} + +func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint32)(p) = uint32(v) + }), nil +} + +func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint64)(p) = v + }), nil +} + +func compileFloat32(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float32)(p) = float32(v) + }), nil +} + +func compileFloat64(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float64)(p) = v + }), nil +} + +func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + if typ == runtime.Type2RType(jsonNumberType) { + return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*json.Number)(p) = v + }), nil + } + return newStringDecoder(structName, fieldName), nil +} + +func compileBool(structName, fieldName string) (Decoder, error) { + return newBoolDecoder(structName, fieldName), nil +} + +func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newBytesDecoder(typ, structName, fieldName), nil +} + +func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil +} + +func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil +} + +func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil +} + +func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newInterfaceDecoder(typ, structName, fieldName), nil +} + +func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) { + return newFuncDecoder(typ, strutName, fieldName), nil +} + +func typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + fieldNum := typ.NumField() + fieldMap := map[string]*structFieldSet{} + typeptr := uintptr(unsafe.Pointer(typ)) + if dec, exists := structTypeToDecoder[typeptr]; exists { + return dec, nil + } + structDec := newStructDecoder(structName, fieldName, fieldMap) + structTypeToDecoder[typeptr] = structDec + structName = typ.Name() + tags := typeToStructTags(typ) + allFields := []*structFieldSet{} + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + isUnexportedField := unicode.IsLower([]rune(field.Name)[0]) + tag := runtime.StructTagFromField(field) + dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder) + if err != nil { + return nil, err + } + if field.Anonymous && !tag.IsTaggedKey { + if stDec, ok := dec.(*structDecoder); ok { + if runtime.Type2RType(field.Type) == typ { + // recursive definition + continue + } + for k, v := range stDec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: v.dec, + offset: field.Offset + v.offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + } + allFields = append(allFields, fieldSet) + } + } else if pdec, ok := dec.(*ptrDecoder); ok { + contentDec := pdec.contentDecoder() + if pdec.typ == typ { + // recursive definition + continue + } + var fieldSetErr error + if isUnexportedField { + fieldSetErr = fmt.Errorf( + "json: cannot set embedded pointer to unexported struct: %v", + field.Type.Elem(), + ) + } + if dec, ok := contentDec.(*structDecoder); ok { + for k, v := range dec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec), + offset: field.Offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + err: fieldSetErr, + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: pdec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) { + dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name) + } + var key string + if tag.Key != "" { + key = tag.Key + } else { + key = field.Name + } + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: key, + keyLen: int64(len(key)), + } + allFields = append(allFields, fieldSet) + } + } + for _, set := range filterDuplicatedFields(allFields) { + fieldMap[set.key] = set + lower := strings.ToLower(set.key) + if _, exists := fieldMap[lower]; !exists { + // first win + fieldMap[lower] = set + } + } + delete(structTypeToDecoder, typeptr) + structDec.tryOptimize() + return structDec, nil +} + +func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet { + fieldMap := map[string][]*structFieldSet{} + for _, field := range allFields { + fieldMap[field.key] = append(fieldMap[field.key], field) + } + duplicatedFieldMap := map[string]struct{}{} + for k, sets := range fieldMap { + sets = filterFieldSets(sets) + if len(sets) != 1 { + duplicatedFieldMap[k] = struct{}{} + } + } + + filtered := make([]*structFieldSet, 0, len(allFields)) + for _, field := range allFields { + if _, exists := duplicatedFieldMap[field.key]; exists { + continue + } + filtered = append(filtered, field) + } + return filtered +} + +func filterFieldSets(sets []*structFieldSet) []*structFieldSet { + if len(sets) == 1 { + return sets + } + filtered := make([]*structFieldSet, 0, len(sets)) + for _, set := range sets { + if set.isTaggedKey { + filtered = append(filtered, set) + } + } + return filtered +} + +func implementsUnmarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go new file mode 100644 index 0000000000..eb7e2b1345 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -0,0 +1,29 @@ +//go:build !race +// +build !race + +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if dec := cachedDecoder[index]; dec != nil { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + cachedDecoder[index] = dec + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go new file mode 100644 index 0000000000..49cdda4a17 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -0,0 +1,37 @@ +//go:build race +// +build race + +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var decMu sync.RWMutex + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + decMu.RLock() + if dec := cachedDecoder[index]; dec != nil { + decMu.RUnlock() + return dec, nil + } + decMu.RUnlock() + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + decMu.Lock() + cachedDecoder[index] = dec + decMu.Unlock() + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/context.go b/vendor/github.com/goccy/go-json/internal/decoder/context.go new file mode 100644 index 0000000000..cb2ffdafd0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/context.go @@ -0,0 +1,254 @@ +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type RuntimeContext struct { + Buf []byte + Option *Option +} + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Option: &Option{}, + } + }, + } +) + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} + +var ( + isWhiteSpace = [256]bool{} +) + +func init() { + isWhiteSpace[' '] = true + isWhiteSpace['\n'] = true + isWhiteSpace['\t'] = true + isWhiteSpace['\r'] = true +} + +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { + for isWhiteSpace[buf[cursor]] { + cursor++ + } + return cursor +} + +func skipObject(buf []byte, cursor, depth int64) (int64, error) { + braceCount := 1 + for { + switch buf[cursor] { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + braceCount-- + if braceCount == 0 { + return cursor + 1, nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipArray(buf []byte, cursor, depth int64) (int64, error) { + bracketCount := 1 + for { + switch buf[cursor] { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + return cursor + 1, nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipValue(buf []byte, cursor, depth int64) (int64, error) { + for { + switch buf[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return skipObject(buf, cursor+1, depth+1) + case '[': + return skipArray(buf, cursor+1, depth+1) + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + return cursor + 1, nil + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + if floatTable[buf[cursor]] { + continue + } + break + } + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + default: + return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + } +} + +func validateTrue(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if buf[cursor+1] != 'r' { + return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor) + } + if buf[cursor+2] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor) + } + if buf[cursor+3] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor) + } + return nil +} + +func validateFalse(buf []byte, cursor int64) error { + if cursor+4 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if buf[cursor+1] != 'a' { + return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor) + } + if buf[cursor+3] != 's' { + return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor) + } + if buf[cursor+4] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor) + } + return nil +} + +func validateNull(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if buf[cursor+1] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor) + } + if buf[cursor+3] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor) + } + return nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go new file mode 100644 index 0000000000..dfb7168da5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go @@ -0,0 +1,158 @@ +package decoder + +import ( + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type floatDecoder struct { + op func(unsafe.Pointer, float64) + structName string + fieldName string +} + +func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder { + return &floatDecoder{op: op, structName: structName, fieldName: fieldName} +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } + + validEndNumberChar = [256]bool{ + nul: true, + ' ': true, + '\t': true, + '\r': true, + '\n': true, + ',': true, + ':': true, + '}': true, + ']': true, + } +) + +func floatBytes(s *Stream) []byte { + start := s.cursor + for { + s.cursor++ + if floatTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + return s.buf[start:s.cursor] +} + +func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset()) +} + +func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + } +} + +func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + str := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, f64) + return nil +} + +func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + if !validEndNumberChar[buf[cursor]] { + return 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + s := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, errors.ErrSyntax(err.Error(), cursor) + } + d.op(p, f64) + return cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go new file mode 100644 index 0000000000..ee35637115 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go @@ -0,0 +1,141 @@ +package decoder + +import ( + "bytes" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type funcDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder { + fnDecoder := &funcDecoder{typ, structName, fieldName} + return fnDecoder +} + +func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '"': + return &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + case 't': + if err := trueBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + case 'f': + if err := falseBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + } + } + return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset()) +} + +func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '"': + return 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + case 't': + if err := validateTrue(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + case 'f': + if err := validateFalse(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + } + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go new file mode 100644 index 0000000000..509b753d64 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go @@ -0,0 +1,242 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type intDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, int64) + structName string + fieldName string +} + +func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder { + return &intDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +var ( + pow10i64 = [...]int64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, + } + pow10i64Len = len(pow10i64) +) + +func (d *intDecoder) parseInt(b []byte) (int64, error) { + isNegative := false + if b[0] == '-' { + b = b[1:] + isNegative = true + } + maxDigit := len(b) + if maxDigit > pow10i64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := int64(0) + for i := 0; i < maxDigit; i++ { + c := int64(b[i]) - 48 + digitValue := pow10i64[maxDigit-i-1] + sum += c * digitValue + } + if isNegative { + return -1 * sum, nil + } + return sum, nil +} + +var ( + numTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + } +) + +var ( + numZeroBuf = []byte{'0'} +) + +func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + if len(num) < 2 { + goto ERROR + } + return num, nil + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset()) +} + +func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[char(b, cursor)] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor) + } + } +} + +func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + i64, err := d.parseInt(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, i64) + s.reset() + return nil +} + +func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + + i64, err := d.parseInt(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, i64) + return cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go new file mode 100644 index 0000000000..4dbb4be4ac --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go @@ -0,0 +1,458 @@ +package decoder + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type interfaceDecoder struct { + typ *runtime.Type + structName string + fieldName string + sliceDecoder *sliceDecoder + mapDecoder *mapDecoder + floatDecoder *floatDecoder + numberDecoder *numberDecoder + stringDecoder *stringDecoder +} + +func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: structName, + fieldName: fieldName, + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder(structName, fieldName), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + structName, + fieldName, + ) + return ifaceDecoder +} + +func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder { + emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName) + stringDecoder := newStringDecoder(structName, fieldName) + return &interfaceDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + sliceDecoder: newSliceDecoder( + emptyIfaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ), + mapDecoder: newMapDecoder( + interfaceMapType, + stringType, + stringDecoder, + interfaceMapType.Elem(), + emptyIfaceDecoder, + structName, + fieldName, + ), + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: stringDecoder, + } +} + +func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { + if s.UseNumber { + return d.numberDecoder + } + return d.floatDecoder +} + +var ( + emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) + interfaceMapType = runtime.Type2RType( + reflect.TypeOf((*map[string]interface{})(nil)).Elem(), + ) + stringType = runtime.Type2RType( + reflect.TypeOf(""), + ) +) + +func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return err + } + return nil +} + +func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil { + return err + } + return nil +} + +func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalText(dst); err != nil { + return err + } + return nil +} + +func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + if s, ok := unquoteBytes(src); ok { + src = s + } + if err := unmarshaler.UnmarshalText(src); err != nil { + return 0, err + } + return end, nil +} + +func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.numDecoder(s).DecodeStream(s, depth, p) + case '"': + s.cursor++ + start := s.cursor + for { + switch s.char() { + case '\\': + if _, err := decodeEscapeString(s, nil); err != nil { + return err + } + case '"': + literal := s.buf[start:s.cursor] + s.cursor++ + *(*interface{})(p) = string(literal) + return nil + case nul: + if s.read() { + continue + } + return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.cursor++ + } + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + case nul: + if s.read() { + c = s.char() + continue + } + } + break + } + return errors.ErrInvalidBeginningOfValue(c, s.totalOffset()) +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeStreamUnmarshalerContext(s, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeStreamUnmarshaler(s, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeStreamTextUnmarshaler(s, depth, u, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + return d.errUnmarshalType(rv.Type(), s.totalOffset()) + } + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeStreamEmptyInterface(s, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeStreamEmptyInterface(s, depth, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return err + } + return decoder.DecodeStream(s, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typ.String(), + Type: typ, + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeUnmarshalerContext(ctx, buf, cursor, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeUnmarshaler(buf, cursor, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeTextUnmarshaler(buf, cursor, depth, u, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return 0, d.errUnmarshalType(rv.Type(), cursor) + } + + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return 0, err + } + return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.Decode(ctx, cursor, depth, p) + case '"': + var v string + ptr := unsafe.Pointer(&v) + cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**interface{})(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go new file mode 100644 index 0000000000..1ef50a7d37 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go @@ -0,0 +1,45 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type invalidDecoder struct { + typ *runtime.Type + kind reflect.Kind + structName string + fieldName string +} + +func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder { + return &invalidDecoder{ + typ: typ, + kind: typ.Kind(), + structName: structName, + fieldName: fieldName, + } +} + +func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go new file mode 100644 index 0000000000..cb55ef006d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -0,0 +1,187 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type mapDecoder struct { + mapType *runtime.Type + keyType *runtime.Type + valueType *runtime.Type + canUseAssignFaststrType bool + keyDecoder Decoder + valueDecoder Decoder + structName string + fieldName string +} + +func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder { + return &mapDecoder{ + mapType: mapType, + keyDecoder: keyDec, + keyType: keyType, + canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType), + valueType: valueType, + valueDecoder: valueDec, + structName: structName, + fieldName: fieldName, + } +} + +const ( + mapMaxElemSize = 128 +) + +// See detail: https://github.com/goccy/go-json/pull/283 +func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool { + indirectElem := value.Size() > mapMaxElemSize + if indirectElem { + return false + } + return key.Kind() == reflect.String +} + +//go:linkname makemap reflect.makemap +func makemap(*runtime.Type, int) unsafe.Pointer + +//nolint:golint +//go:linkname mapassign_faststr runtime.mapassign_faststr +//go:noescape +func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer) + +func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) { + if d.canUseAssignFaststrType { + mapV := mapassign_faststr(t, m, *(*string)(k)) + typedmemmove(d.valueType, mapV, v) + } else { + mapassign(t, m, k, v) + } +} + +func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + switch s.skipWhiteSpace() { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return nil + case '{': + default: + return errors.ErrExpected("{ character for map value", s.totalOffset()) + } + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + s.cursor++ + if s.equalChar('}') { + *(*unsafe.Pointer)(p) = mapValue + s.cursor++ + return nil + } + for { + k := unsafe_New(d.keyType) + if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil { + return err + } + s.skipWhiteSpace() + if !s.equalChar(':') { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + v := unsafe_New(d.valueType) + if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil { + return err + } + d.mapassign(d.mapType, mapValue, k, v) + s.skipWhiteSpace() + if s.equalChar('}') { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + s.cursor++ + return nil + } + if !s.equalChar(',') { + return errors.ErrExpected("comma after object value", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return cursor, nil + case '{': + default: + return 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + for { + k := unsafe_New(d.keyType) + keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + v := unsafe_New(d.valueType) + valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v) + if err != nil { + return 0, err + } + d.mapassign(d.mapType, mapValue, k, v) + cursor = skipWhiteSpace(buf, valueCursor) + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + if buf[cursor] != ',' { + return 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go new file mode 100644 index 0000000000..bf63773e30 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go @@ -0,0 +1,112 @@ +package decoder + +import ( + "encoding/json" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type numberDecoder struct { + stringDecoder *stringDecoder + op func(unsafe.Pointer, json.Number) + structName string + fieldName string +} + +func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder { + return &numberDecoder{ + stringDecoder: newStringDecoder(structName, fieldName), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, json.Number(string(bytes))) + s.reset() + return nil +} + +func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return 0, errors.ErrSyntax(err.Error(), c) + } + cursor = c + s := *(*string)(unsafe.Pointer(&bytes)) + d.op(p, json.Number(s)) + return cursor, nil +} + +func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + start := s.cursor + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case '"': + return d.stringDecoder.decodeStreamByte(s) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + if s.cursor == start { + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset()) +} + +func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + case '"': + return d.stringDecoder.decodeByte(buf, cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go new file mode 100644 index 0000000000..e41f876b03 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go @@ -0,0 +1,15 @@ +package decoder + +import "context" + +type OptionFlags uint8 + +const ( + FirstWinOption OptionFlags = 1 << iota + ContextOption +) + +type Option struct { + Flags OptionFlags + Context context.Context +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go new file mode 100644 index 0000000000..2c83b9c445 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go @@ -0,0 +1,87 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type ptrDecoder struct { + dec Decoder + typ *runtime.Type + structName string + fieldName string +} + +func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder { + return &ptrDecoder{ + dec: dec, + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *ptrDecoder) contentDecoder() Decoder { + dec, ok := d.dec.(*ptrDecoder) + if !ok { + return d.dec + } + return dec.contentDecoder() +} + +//nolint:golint +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(*runtime.Type) unsafe.Pointer + +func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if s.skipWhiteSpace() == nul { + s.read() + } + if s.char() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + if err := d.dec.DecodeStream(s, depth, newptr); err != nil { + return err + } + return nil +} + +func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + if p != nil { + *(*unsafe.Pointer)(p) = nil + } + cursor += 4 + return cursor, nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + c, err := d.dec.Decode(ctx, cursor, depth, newptr) + if err != nil { + return 0, err + } + cursor = c + return cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go new file mode 100644 index 0000000000..85b6e1119e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go @@ -0,0 +1,301 @@ +package decoder + +import ( + "reflect" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +var ( + sliceType = runtime.Type2RType( + reflect.TypeOf((*sliceHeader)(nil)).Elem(), + ) + nilSlice = unsafe.Pointer(&sliceHeader{}) +) + +type sliceDecoder struct { + elemType *runtime.Type + isElemPointerType bool + valueDecoder Decoder + size uintptr + arrayPool sync.Pool + structName string + fieldName string +} + +// If use reflect.SliceHeader, data type is uintptr. +// In this case, Go compiler cannot trace reference created by newArray(). +// So, define using unsafe.Pointer as data type +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +const ( + defaultSliceCapacity = 2 +) + +func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder { + return &sliceDecoder{ + valueDecoder: dec, + elemType: elemType, + isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map, + size: size, + arrayPool: sync.Pool{ + New: func() interface{} { + return &sliceHeader{ + data: newArray(elemType, defaultSliceCapacity), + len: 0, + cap: defaultSliceCapacity, + } + }, + }, + structName: structName, + fieldName: fieldName, + } +} + +func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader { + slice := d.arrayPool.Get().(*sliceHeader) + if src.len > 0 { + // copy original elem + if slice.cap < src.cap { + data := newArray(d.elemType, src.cap) + slice = &sliceHeader{data: data, len: src.len, cap: src.cap} + } else { + slice.len = src.len + } + copySlice(d.elemType, *slice, *src) + } else { + slice.len = 0 + } + return slice +} + +func (d *sliceDecoder) releaseSlice(p *sliceHeader) { + d.arrayPool.Put(p) +} + +//go:linkname copySlice reflect.typedslicecopy +func copySlice(elemType *runtime.Type, dst, src sliceHeader) int + +//go:linkname newArray reflect.unsafe_NewArray +func newArray(*runtime.Type, int) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer) + +func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: "number", + Type: reflect.SliceOf(runtime.RType2Type(d.elemType)), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case 'n': + if err := nullBytes(s); err != nil { + return err + } + typedmemmove(sliceType, p, nilSlice) + return nil + case '[': + s.cursor++ + if s.skipWhiteSpace() == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + s.cursor++ + return nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + + if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil { + return err + } + s.skipWhiteSpace() + RETRY: + switch s.char() { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + s.cursor++ + return nil + case ',': + idx++ + case nul: + if s.read() { + goto RETRY + } + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + } + s.cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.errNumber(s.totalOffset()) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset()) +} + +func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + typedmemmove(sliceType, p, nilSlice) + return cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + cursor++ + return cursor, nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep) + if err != nil { + return 0, err + } + cursor = c + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + cursor++ + return cursor, nil + case ',': + idx++ + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, d.errNumber(cursor) + default: + return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/stream.go b/vendor/github.com/goccy/go-json/internal/decoder/stream.go new file mode 100644 index 0000000000..a383f72596 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/stream.go @@ -0,0 +1,556 @@ +package decoder + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +const ( + initBufSize = 512 +) + +type Stream struct { + buf []byte + bufSize int64 + length int64 + r io.Reader + offset int64 + cursor int64 + filledBuffer bool + allRead bool + UseNumber bool + DisallowUnknownFields bool + Option *Option +} + +func NewStream(r io.Reader) *Stream { + return &Stream{ + r: r, + bufSize: initBufSize, + buf: make([]byte, initBufSize), + Option: &Option{}, + } +} + +func (s *Stream) TotalOffset() int64 { + return s.totalOffset() +} + +func (s *Stream) Buffered() io.Reader { + buflen := int64(len(s.buf)) + for i := s.cursor; i < buflen; i++ { + if s.buf[i] == nul { + return bytes.NewReader(s.buf[s.cursor:i]) + } + } + return bytes.NewReader(s.buf[s.cursor:]) +} + +func (s *Stream) PrepareForDecode() error { + for { + switch s.char() { + case ' ', '\t', '\r', '\n': + s.cursor++ + continue + case ',', ':': + s.cursor++ + return nil + case nul: + if s.read() { + continue + } + return io.EOF + } + break + } + return nil +} + +func (s *Stream) totalOffset() int64 { + return s.offset + s.cursor +} + +func (s *Stream) char() byte { + return s.buf[s.cursor] +} + +func (s *Stream) equalChar(c byte) bool { + cur := s.buf[s.cursor] + if cur == nul { + s.read() + cur = s.buf[s.cursor] + } + return cur == c +} + +func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) { + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) bufptr() unsafe.Pointer { + return (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) { + s.cursor-- // for retry ( because caller progress cursor position in each loop ) + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) Reset() { + s.reset() + s.bufSize = int64(len(s.buf)) +} + +func (s *Stream) More() bool { + for { + switch s.char() { + case ' ', '\n', '\r', '\t': + s.cursor++ + continue + case '}', ']': + return false + case nul: + if s.read() { + continue + } + return false + } + break + } + return true +} + +func (s *Stream) Token() (interface{}, error) { + for { + c := s.char() + switch c { + case ' ', '\n', '\r', '\t': + s.cursor++ + case '{', '[', ']', '}': + s.cursor++ + return json.Delim(c), nil + case ',', ':': + s.cursor++ + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + bytes := floatBytes(s) + str := *(*string)(unsafe.Pointer(&bytes)) + if s.UseNumber { + return json.Number(str), nil + } + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, err + } + return f64, nil + case '"': + bytes, err := stringBytes(s) + if err != nil { + return nil, err + } + return string(bytes), nil + case 't': + if err := trueBytes(s); err != nil { + return nil, err + } + return true, nil + case 'f': + if err := falseBytes(s); err != nil { + return nil, err + } + return false, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto END + default: + return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset()) + } + } +END: + return nil, io.EOF +} + +func (s *Stream) reset() { + s.offset += s.cursor + s.buf = s.buf[s.cursor:] + s.length -= s.cursor + s.cursor = 0 +} + +func (s *Stream) readBuf() []byte { + if s.filledBuffer { + s.bufSize *= 2 + remainBuf := s.buf + s.buf = make([]byte, s.bufSize) + copy(s.buf, remainBuf) + } + remainLen := s.length - s.cursor + remainNotNulCharNum := int64(0) + for i := int64(0); i < remainLen; i++ { + if s.buf[s.cursor+i] == nul { + break + } + remainNotNulCharNum++ + } + s.length = s.cursor + remainNotNulCharNum + return s.buf[s.cursor+remainNotNulCharNum:] +} + +func (s *Stream) read() bool { + if s.allRead { + return false + } + buf := s.readBuf() + last := len(buf) - 1 + buf[last] = nul + n, err := s.r.Read(buf[:last]) + s.length += int64(n) + if n == last { + s.filledBuffer = true + } else { + s.filledBuffer = false + } + if err == io.EOF { + s.allRead = true + } else if err != nil { + return false + } + return true +} + +func (s *Stream) skipWhiteSpace() byte { + p := s.bufptr() +LOOP: + c := char(p, s.cursor) + switch c { + case ' ', '\n', '\t', '\r': + s.cursor++ + goto LOOP + case nul: + if s.read() { + p = s.bufptr() + goto LOOP + } + } + return c +} + +func (s *Stream) skipObject(depth int64) error { + braceCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + braceCount-- + depth-- + if braceCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipArray(depth int64) error { + bracketCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipValue(depth int64) error { + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset()) + case '{': + s.cursor = cursor + 1 + return s.skipObject(depth + 1) + case '[': + s.cursor = cursor + 1 + return s.skipArray(depth + 1) + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + case '"': + s.cursor = cursor + 1 + return nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + c := char(p, cursor) + if floatTable[c] { + continue + } else if c == nul { + if s.read() { + _, cursor, p = s.stat() + continue + } + } + s.cursor = cursor + return nil + } + case 't': + s.cursor = cursor + if err := trueBytes(s); err != nil { + return err + } + return nil + case 'f': + s.cursor = cursor + if err := falseBytes(s); err != nil { + return err + } + return nil + case 'n': + s.cursor = cursor + if err := nullBytes(s); err != nil { + return err + } + return nil + } + cursor++ + } +} + +func nullBytes(s *Stream) error { + // current cursor's character is 'n' + s.cursor++ + if s.char() != 'u' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadNull(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset()) +} + +func trueBytes(s *Stream) error { + // current cursor's character is 't' + s.cursor++ + if s.char() != 'r' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'u' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadTrue(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset()) +} + +func falseBytes(s *Stream) error { + // current cursor's character is 'f' + s.cursor++ + if s.char() != 'a' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 's' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadFalse(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset()) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go new file mode 100644 index 0000000000..d07ad7101c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -0,0 +1,441 @@ +package decoder + +import ( + "bytes" + "fmt" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type stringDecoder struct { + structName string + fieldName string +} + +func newStringDecoder(structName, fieldName string) *stringDecoder { + return &stringDecoder{ + structName: structName, + fieldName: fieldName, + } +} + +func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typeName, + Type: reflect.TypeOf(""), + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + s.reset() + return nil +} + +func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + return cursor, nil +} + +var ( + hexToInt = [256]int{ + '0': 0, + '1': 1, + '2': 2, + '3': 3, + '4': 4, + '5': 5, + '6': 6, + '7': 7, + '8': 8, + '9': 9, + 'A': 10, + 'B': 11, + 'C': 12, + 'D': 13, + 'E': 14, + 'F': 15, + 'a': 10, + 'b': 11, + 'c': 12, + 'd': 13, + 'e': 14, + 'f': 15, + } +) + +func unicodeToRune(code []byte) rune { + var r rune + for i := 0; i < len(code); i++ { + r = r*16 + rune(hexToInt[code[i]]) + } + return r +} + +func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { + for s.cursor+n >= s.length { + if !s.read() { + return false + } + *p = s.bufptr() + } + return true +} + +func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { + const defaultOffset = 5 + const surrogateOffset = 11 + + if !readAtLeast(s, defaultOffset, &p) { + return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + + r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + if !readAtLeast(s, surrogateOffset, &p) { + return unicode.ReplacementChar, defaultOffset, p, nil + } + if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { + return unicode.ReplacementChar, defaultOffset, p, nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return r, surrogateOffset, p, nil + } + } + return r, defaultOffset, p, nil +} + +func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + const backSlashAndULen = 2 // length of \u + + r, offset, pp, err := decodeUnicodeRune(s, p) + if err != nil { + return nil, err + } + unicode := []byte(string(r)) + unicodeLen := int64(len(unicode)) + s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...) + unicodeOrgLen := offset - 1 + s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen)) + s.cursor = s.cursor - backSlashAndULen + unicodeLen + return pp, nil +} + +func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + s.cursor++ +RETRY: + switch s.buf[s.cursor] { + case '"': + s.buf[s.cursor] = '"' + case '\\': + s.buf[s.cursor] = '\\' + case '/': + s.buf[s.cursor] = '/' + case 'b': + s.buf[s.cursor] = '\b' + case 'f': + s.buf[s.cursor] = '\f' + case 'n': + s.buf[s.cursor] = '\n' + case 'r': + s.buf[s.cursor] = '\r' + case 't': + s.buf[s.cursor] = '\t' + case 'u': + return decodeUnicode(s, p) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + p = s.bufptr() + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...) + s.length-- + s.cursor-- + p = s.bufptr() + return p, nil +} + +var ( + runeErrBytes = []byte(string(utf8.RuneError)) + runeErrBytesLen = int64(len(runeErrBytes)) +) + +func stringBytes(s *Stream) ([]byte, error) { + _, cursor, p := s.stat() + cursor++ // skip double quote char + start := cursor + for { + switch char(p, cursor) { + case '\\': + s.cursor = cursor + pp, err := decodeEscapeString(s, p) + if err != nil { + return nil, err + } + p = pp + cursor = s.cursor + case '"': + literal := s.buf[start:cursor] + cursor++ + s.cursor = cursor + return literal, nil + case + // 0x00 is nul, 0x5c is '\\', 0x22 is '"' . + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F + 0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F + // character is ASCII. skip to next char + case + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F + 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF + 0xC0, 0xC1, // 0xC0-0xC1 + 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE + // character is invalid + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + _, _, p = s.stat() + cursor += runeErrBytesLen + s.length += runeErrBytesLen + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + case 0xEF: + // RuneError is {0xEF, 0xBF, 0xBD} + if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD { + // found RuneError: skip + cursor += 2 + break + } + fallthrough + default: + // multi bytes character + if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + } + r, size := utf8.DecodeRune(s.buf[cursor:]) + if r == utf8.RuneError { + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + cursor += runeErrBytesLen + s.length += runeErrBytesLen + _, _, p = s.stat() + } else { + cursor += int64(size) + } + continue + } + cursor++ + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) +} + +func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '[': + return nil, d.errUnmarshalType("array", s.totalOffset()) + case '{': + return nil, d.errUnmarshalType("object", s.totalOffset()) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, d.errUnmarshalType("number", s.totalOffset()) + case '"': + return stringBytes(s) + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + } + break + } + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) +} + +func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + case '[': + return nil, 0, d.errUnmarshalType("array", cursor) + case '{': + return nil, 0, d.errUnmarshalType("object", cursor) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errUnmarshalType("number", cursor) + case '"': + cursor++ + start := cursor + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + escaped := 0 + for { + switch char(b, cursor) { + case '\\': + escaped++ + cursor++ + switch char(b, cursor) { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + cursor++ + case 'u': + buflen := int64(len(buf)) + if cursor+5 >= buflen { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + for i := int64(1); i <= 4; i++ { + c := char(b, cursor+i) + if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) { + return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i) + } + } + cursor += 5 + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + continue + case '"': + literal := buf[start:cursor] + if escaped > 0 { + literal = literal[:unescapeString(literal)] + } + cursor++ + return literal, cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + cursor++ + } + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) + } + } +} + +var unescapeMap = [256]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer { + return unsafe.Pointer(uintptr(ptr) + uintptr(offset)) +} + +func unescapeString(buf []byte) int { + p := (*sliceHeader)(unsafe.Pointer(&buf)).data + end := unsafeAdd(p, len(buf)) + src := unsafeAdd(p, bytes.IndexByte(buf, '\\')) + dst := src + for src != end { + c := char(src, 0) + if c == '\\' { + escapeChar := char(src, 1) + if escapeChar != 'u' { + *(*byte)(dst) = unescapeMap[escapeChar] + src = unsafeAdd(src, 2) + dst = unsafeAdd(dst, 1) + } else { + v1 := hexToInt[char(src, 2)] + v2 := hexToInt[char(src, 3)] + v3 := hexToInt[char(src, 4)] + v4 := hexToInt[char(src, 5)] + code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) { + if char(src, 6) == '\\' && char(src, 7) == 'u' { + v1 := hexToInt[char(src, 8)] + v2 := hexToInt[char(src, 9)] + v3 := hexToInt[char(src, 10)] + v4 := hexToInt[char(src, 11)] + lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if lo >= 0xdc00 && lo < 0xe000 { + code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000 + src = unsafeAdd(src, 6) + } + } + } + var b [utf8.UTFMax]byte + n := utf8.EncodeRune(b[:], code) + switch n { + case 4: + *(*byte)(unsafeAdd(dst, 3)) = b[3] + fallthrough + case 3: + *(*byte)(unsafeAdd(dst, 2)) = b[2] + fallthrough + case 2: + *(*byte)(unsafeAdd(dst, 1)) = b[1] + fallthrough + case 1: + *(*byte)(unsafeAdd(dst, 0)) = b[0] + } + src = unsafeAdd(src, 6) + dst = unsafeAdd(dst, n) + } + } else { + *(*byte)(dst) = c + src = unsafeAdd(src, 1) + dst = unsafeAdd(dst, 1) + } + } + return int(uintptr(dst) - uintptr(p)) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go new file mode 100644 index 0000000000..2c64680458 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -0,0 +1,819 @@ +package decoder + +import ( + "fmt" + "math" + "math/bits" + "sort" + "strings" + "unicode" + "unicode/utf16" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type structFieldSet struct { + dec Decoder + offset uintptr + isTaggedKey bool + fieldIdx int + key string + keyLen int64 + err error +} + +type structDecoder struct { + fieldMap map[string]*structFieldSet + fieldUniqueNameNum int + stringDecoder *stringDecoder + structName string + fieldName string + isTriedOptimize bool + keyBitmapUint8 [][256]uint8 + keyBitmapUint16 [][256]uint16 + sortedFieldSets []*structFieldSet + keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error) + keyStreamDecoder func(*structDecoder, *Stream) (*structFieldSet, string, error) +} + +var ( + largeToSmallTable [256]byte +) + +func init() { + for i := 0; i < 256; i++ { + c := i + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + largeToSmallTable[i] = byte(c) + } +} + +func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { + return &structDecoder{ + fieldMap: fieldMap, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + keyDecoder: decodeKey, + keyStreamDecoder: decodeKeyStream, + } +} + +const ( + allowOptimizeMaxKeyLen = 64 + allowOptimizeMaxFieldLen = 16 +) + +func (d *structDecoder) tryOptimize() { + fieldUniqueNameMap := map[string]int{} + fieldIdx := -1 + for k, v := range d.fieldMap { + lower := strings.ToLower(k) + idx, exists := fieldUniqueNameMap[lower] + if exists { + v.fieldIdx = idx + } else { + fieldIdx++ + v.fieldIdx = fieldIdx + } + fieldUniqueNameMap[lower] = fieldIdx + } + d.fieldUniqueNameNum = len(fieldUniqueNameMap) + + if d.isTriedOptimize { + return + } + fieldMap := map[string]*structFieldSet{} + conflicted := map[string]struct{}{} + for k, v := range d.fieldMap { + key := strings.ToLower(k) + if key != k { + // already exists same key (e.g. Hello and HELLO has same lower case key + if _, exists := conflicted[key]; exists { + d.isTriedOptimize = true + return + } + conflicted[key] = struct{}{} + } + if field, exists := fieldMap[key]; exists { + if field != v { + d.isTriedOptimize = true + return + } + } + fieldMap[key] = v + } + + if len(fieldMap) > allowOptimizeMaxFieldLen { + d.isTriedOptimize = true + return + } + + var maxKeyLen int + sortedKeys := []string{} + for key := range fieldMap { + keyLen := len(key) + if keyLen > allowOptimizeMaxKeyLen { + d.isTriedOptimize = true + return + } + if maxKeyLen < keyLen { + maxKeyLen = keyLen + } + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + // By allocating one extra capacity than `maxKeyLen`, + // it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time. + bitmapLen := maxKeyLen + 1 + if len(sortedKeys) <= 8 { + keyBitmap := make([][256]uint8, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint8 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint8 + d.keyStreamDecoder = decodeKeyByBitmapUint8Stream + } else { + keyBitmap := make([][256]uint16, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint16 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint16 + d.keyStreamDecoder = decodeKeyByBitmapUint16Stream + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64) { + const defaultOffset = 4 + const surrogateOffset = 6 + + r := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + cursor += defaultOffset + if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { + return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1 + } + cursor += 2 + r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return []byte(string(r)), cursor + defaultOffset - 1 + } + } + return []byte(string(r)), cursor + defaultOffset - 1 +} + +func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64) { + c := buf[cursor] + cursor++ + switch c { + case '"': + return []byte{'"'}, cursor + case '\\': + return []byte{'\\'}, cursor + case '/': + return []byte{'/'}, cursor + case 'b': + return []byte{'\b'}, cursor + case 'f': + return []byte{'\f'}, cursor + case 'n': + return []byte{'\n'}, cursor + case 'r': + return []byte{'\r'}, cursor + case 't': + return []byte{'\t'}, cursor + case 'u': + return decodeKeyCharByUnicodeRune(buf, cursor) + } + return nil, cursor +} + +func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor := decodeKeyCharByEscapedChar(buf, cursor) + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor := decodeKeyCharByEscapedChar(buf, cursor) + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) { + for { + cursor++ + switch char(b, cursor) { + case '"': + cursor++ + return cursor, nil, nil + case '\\': + cursor++ + if char(b, cursor) == nul { + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + } +} + +func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + key, c, err := d.stringDecoder.decodeByte(buf, cursor) + if err != nil { + return 0, nil, err + } + cursor = c + k := *(*string)(unsafe.Pointer(&key)) + field, exists := d.fieldMap[k] + if !exists { + return cursor, nil, nil + } + return cursor, field, nil +} + +func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if s.cursor+defaultOffset >= s.length { + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset()) + } + } + + r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + s.cursor += defaultOffset + if s.cursor+surrogateOffset >= s.length { + s.read() + } + if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' { + s.cursor += defaultOffset - 1 + return []byte(string(unicode.ReplacementChar)), nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil + } + } + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil +} + +func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) { + c := s.buf[s.cursor] + s.cursor++ +RETRY: + switch c { + case '"': + return []byte{'"'}, nil + case '\\': + return []byte{'\\'}, nil + case '/': + return []byte{'/'}, nil + case 'b': + return []byte{'\b'}, nil + case 'f': + return []byte{'\f'}, nil + case 'n': + return []byte{'\n'}, nil + case 'r': + return []byte{'\r'}, nil + case 't': + return []byte{'\t'}, nil + case 'u': + return decodeKeyCharByUnicodeRuneStream(s) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset()) + } + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset()) + } +} + +func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) { + buf, cursor, p := s.stat() + for { + cursor++ + switch char(p, cursor) { + case '"': + b := buf[start:cursor] + key := *(*string)(unsafe.Pointer(&b)) + cursor++ + s.cursor = cursor + return nil, key, nil + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + case nul: + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + } +} + +func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + key, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return nil, "", err + } + k := *(*string)(unsafe.Pointer(&key)) + return d.fieldMap[k], k, nil +} + +func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + c := s.skipWhiteSpace() + switch c { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + default: + if s.char() != '{' { + return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + s.cursor++ + return nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (s.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + s.reset() + field, key, err := d.keyStreamDecoder(d, s) + if err != nil { + return err + } + if s.skipWhiteSpace() != ':' { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + if field != nil { + if field.err != nil { + return field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + if err := s.skipValue(depth); err != nil { + return err + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return s.skipObject(depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + } + } else if s.DisallowUnknownFields { + return fmt.Errorf("json: unknown field %q", key) + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + c := s.skipWhiteSpace() + if c == '}' { + s.cursor++ + return nil + } + if c != ',' { + return errors.ErrExpected("comma after object element", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + buflen := int64(len(buf)) + cursor = skipWhiteSpace(buf, cursor) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + switch char(b, cursor) { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '{': + default: + return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return cursor, nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (ctx.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + c, field, err := d.keyDecoder(d, buf, cursor) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, c) + if char(b, cursor) != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + if cursor >= buflen { + return 0, errors.ErrExpected("object value after colon", cursor) + } + if field != nil { + if field.err != nil { + return 0, field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return skipObject(buf, cursor, depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if char(b, cursor) == '}' { + cursor++ + return cursor, nil + } + if char(b, cursor) != ',' { + return 0, errors.ErrExpected("comma after object element", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go new file mode 100644 index 0000000000..70e9907c83 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go @@ -0,0 +1,29 @@ +package decoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "unsafe" +) + +type Decoder interface { + Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) + DecodeStream(*Stream, int64, unsafe.Pointer) error +} + +const ( + nul = '\000' + maxDecodeNestingDepth = 10000 +) + +type unmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +var ( + unmarshalJSONType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem() + unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go new file mode 100644 index 0000000000..a62c514928 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go @@ -0,0 +1,190 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type uintDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, uint64) + structName string + fieldName string +} + +func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder { + return &uintDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Offset: offset, + } +} + +var ( + pow10u64 = [...]uint64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + pow10u64Len = len(pow10u64) +) + +func (d *uintDecoder) parseUint(b []byte) (uint64, error) { + maxDigit := len(b) + if maxDigit > pow10u64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := uint64(0) + for i := 0; i < maxDigit; i++ { + c := uint64(b[i]) - 48 + digitValue := pow10u64[maxDigit-i-1] + sum += c * digitValue + } + return sum, nil +} + +func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + break + } + return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset()) +} + +func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{buf[cursor]}, cursor) + } + } +} + +func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + u64, err := d.parseUint(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, u64) + return nil +} + +func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + u64, err := d.parseUint(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, u64) + return cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go new file mode 100644 index 0000000000..e9b25c68fc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go @@ -0,0 +1,99 @@ +package decoder + +import ( + "context" + "encoding/json" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalJSONDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder { + return &unmarshalJSONDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + switch v := v.(type) { + case unmarshalerContext: + var ctx context.Context + if (s.Option.Flags & ContextOption) != 0 { + ctx = s.Option.Context + } else { + ctx = context.Background() + } + if err := v.UnmarshalJSON(ctx, dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + case json.Unmarshaler: + if err := v.UnmarshalJSON(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + } + return nil +} + +func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if (ctx.Option.Flags & ContextOption) != 0 { + if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } else { + if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } + return end, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go new file mode 100644 index 0000000000..1ef2877829 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go @@ -0,0 +1,280 @@ +package decoder + +import ( + "bytes" + "encoding" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalTextDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder { + return &unmarshalTextDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +var ( + nullbytes = []byte(`null`) +) + +func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + } + } + dst := make([]byte, len(src)) + copy(dst, src) + + if b, ok := unquoteBytes(dst); ok { + dst = b + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + return nil +} + +func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + } + } + + if s, ok := unquoteBytes(src); ok { + src = s + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil { + d.annotateError(cursor, err) + return 0, err + } + return end, nil +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + length := len(s) + if length < 2 || s[0] != '"' || s[length-1] != '"' { + return + } + s = s[1 : length-1] + length -= 2 + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < length { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == length { + return s, true + } + + b := make([]byte, length+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < length { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= length { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go new file mode 100644 index 0000000000..66227ae021 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go @@ -0,0 +1,68 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type wrappedStringDecoder struct { + typ *runtime.Type + dec Decoder + stringDecoder *stringDecoder + structName string + fieldName string + isPtrType bool +} + +func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder { + return &wrappedStringDecoder{ + typ: typ, + dec: dec, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + isPtrType: typ.Kind() == reflect.Ptr, + } +} + +func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return nil + } + b := make([]byte, len(bytes)+1) + copy(b, bytes) + if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil { + return err + } + return nil +} + +func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return c, nil + } + bytes = append(bytes, nul) + oldBuf := ctx.Buf + ctx.Buf = bytes + if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil { + return 0, err + } + ctx.Buf = oldBuf + return c, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go new file mode 100644 index 0000000000..8d62a9cd53 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -0,0 +1,1017 @@ +package encoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type Code interface { + Kind() CodeKind + ToOpcode(*compileContext) Opcodes + Filter(*FieldQuery) Code +} + +type AnonymousCode interface { + ToAnonymousOpcode(*compileContext) Opcodes +} + +type Opcodes []*Opcode + +func (o Opcodes) First() *Opcode { + if len(o) == 0 { + return nil + } + return o[0] +} + +func (o Opcodes) Last() *Opcode { + if len(o) == 0 { + return nil + } + return o[len(o)-1] +} + +func (o Opcodes) Add(codes ...*Opcode) Opcodes { + return append(o, codes...) +} + +type CodeKind int + +const ( + CodeKindInterface CodeKind = iota + CodeKindPtr + CodeKindInt + CodeKindUint + CodeKindFloat + CodeKindString + CodeKindBool + CodeKindStruct + CodeKindMap + CodeKindSlice + CodeKindArray + CodeKindBytes + CodeKindMarshalJSON + CodeKindMarshalText + CodeKindRecursive +) + +type IntCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *IntCode) Kind() CodeKind { + return CodeKindInt +} + +func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpIntPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpIntString) + default: + code = newOpCode(ctx, c.typ, OpInt) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *IntCode) Filter(_ *FieldQuery) Code { + return c +} + +type UintCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *UintCode) Kind() CodeKind { + return CodeKindUint +} + +func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpUintPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpUintString) + default: + code = newOpCode(ctx, c.typ, OpUint) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *UintCode) Filter(_ *FieldQuery) Code { + return c +} + +type FloatCode struct { + typ *runtime.Type + bitSize uint8 + isPtr bool +} + +func (c *FloatCode) Kind() CodeKind { + return CodeKindFloat +} + +func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32Ptr) + default: + code = newOpCode(ctx, c.typ, OpFloat64Ptr) + } + default: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32) + default: + code = newOpCode(ctx, c.typ, OpFloat64) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *FloatCode) Filter(_ *FieldQuery) Code { + return c +} + +type StringCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *StringCode) Kind() CodeKind { + return CodeKindString +} + +func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes { + isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType) + var code *Opcode + if c.isPtr { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumberPtr) + } else { + code = newOpCode(ctx, c.typ, OpStringPtr) + } + } else { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumber) + } else { + code = newOpCode(ctx, c.typ, OpString) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *StringCode) Filter(_ *FieldQuery) Code { + return c +} + +type BoolCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BoolCode) Kind() CodeKind { + return CodeKindBool +} + +func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBoolPtr) + default: + code = newOpCode(ctx, c.typ, OpBool) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BoolCode) Filter(_ *FieldQuery) Code { + return c +} + +type BytesCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BytesCode) Kind() CodeKind { + return CodeKindBytes +} + +func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBytesPtr) + default: + code = newOpCode(ctx, c.typ, OpBytes) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BytesCode) Filter(_ *FieldQuery) Code { + return c +} + +type SliceCode struct { + typ *runtime.Type + value Code +} + +func (c *SliceCode) Kind() CodeKind { + return CodeKindSlice +} + +func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + size := c.typ.Elem().Size() + header := newSliceHeaderCode(ctx, c.typ) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size) + ctx.incIndex() + end := newOpCode(ctx, c.typ, OpSliceEnd) + ctx.incIndex() + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *SliceCode) Filter(_ *FieldQuery) Code { + return c +} + +type ArrayCode struct { + typ *runtime.Type + value Code +} + +func (c *ArrayCode) Kind() CodeKind { + return CodeKindArray +} + +func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + elem := c.typ.Elem() + alen := c.typ.Len() + size := elem.Size() + + header := newArrayHeaderCode(ctx, c.typ, alen) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + + elemCode := newArrayElemCode(ctx, elem, header, alen, size) + ctx.incIndex() + + end := newOpCode(ctx, c.typ, OpArrayEnd) + ctx.incIndex() + + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *ArrayCode) Filter(_ *FieldQuery) Code { + return c +} + +type MapCode struct { + typ *runtime.Type + key Code + value Code +} + +func (c *MapCode) Kind() CodeKind { + return CodeKindMap +} + +func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => value => code => key => code => value => code => end + // ^ | + // |_______________________| + header := newMapHeaderCode(ctx, c.typ) + ctx.incIndex() + + keyCodes := c.key.ToOpcode(ctx) + + value := newMapValueCode(ctx, c.typ.Elem(), header) + ctx.incIndex() + + ctx.incIndent() + valueCodes := c.value.ToOpcode(ctx) + ctx.decIndent() + + valueCodes.First().Flags |= IndirectFlags + + key := newMapKeyCode(ctx, c.typ.Key(), header) + ctx.incIndex() + + end := newMapEndCode(ctx, c.typ, header) + ctx.incIndex() + + header.Next = keyCodes.First() + keyCodes.Last().Next = value + value.Next = valueCodes.First() + valueCodes.Last().Next = key + key.Next = keyCodes.First() + + header.End = end + key.End = end + value.End = end + return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end) +} + +func (c *MapCode) Filter(_ *FieldQuery) Code { + return c +} + +type StructCode struct { + typ *runtime.Type + fields []*StructFieldCode + isPtr bool + disableIndirectConversion bool + isIndirect bool + isRecursive bool +} + +func (c *StructCode) Kind() CodeKind { + return CodeKindStruct +} + +func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode { + if isEmbeddedStruct(field) { + return c.lastAnonymousFieldCode(firstField) + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { + // firstField is special StructHead operation for anonymous structure. + // So, StructHead's next operation is truly struct head operation. + lastField := firstField.Next + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + ctx.incIndent() + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + endField := fieldCodes.Last() + if isEmbeddedStruct(field) { + firstField.End = endField + lastField := c.lastAnonymousFieldCode(firstField) + lastField.NextField = endField + } + if len(codes) > 0 { + codes.First().End = endField + } else { + firstField.End = endField + } + codes = codes.Add(fieldCodes...) + break + } + prevField = c.lastFieldCode(field, firstField) + codes = codes.Add(fieldCodes...) + } + if len(codes) == 0 { + head := &Opcode{ + Op: OpStructHead, + Idx: opcodeOffset(ctx.ptrIndex), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + ctx.incOpcodeIndex() + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + head.NextField = end + head.Next = end + head.End = end + codes = codes.Add(head, end) + ctx.incIndex() + } + ctx.decIndent() + ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes + return codes +} + +func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + lastField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = lastField + } else { + firstField.End = lastField + } + } + prevField = firstField + codes = codes.Add(fieldCodes...) + } + return codes +} + +func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) { + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.removeFieldsByTags(tags) + if len(structCode.fields) > 0 { + fields = append(fields, field) + } + continue + } + } + if tags.ExistsKey(field.key) { + continue + } + fields = append(fields, field) + } + c.fields = fields +} + +func (c *StructCode) enableIndirect() { + if c.isIndirect { + return + } + c.isIndirect = true + if len(c.fields) == 0 { + return + } + structCode := c.fields[0].getStruct() + if structCode == nil { + return + } + structCode.enableIndirect() +} + +func (c *StructCode) Filter(query *FieldQuery) Code { + fieldMap := map[string]*FieldQuery{} + for _, field := range query.Fields { + fieldMap[field.Name] = field + } + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + query, exists := fieldMap[field.key] + if !exists { + continue + } + fieldCode := &StructFieldCode{ + typ: field.typ, + key: field.key, + tag: field.tag, + value: field.value, + offset: field.offset, + isAnonymous: field.isAnonymous, + isTaggedKey: field.isTaggedKey, + isNilableType: field.isNilableType, + isNilCheck: field.isNilCheck, + isAddrForMarshaler: field.isAddrForMarshaler, + isNextOpPtrType: field.isNextOpPtrType, + } + if len(query.Fields) > 0 { + fieldCode.value = fieldCode.value.Filter(query) + } + fields = append(fields, fieldCode) + } + return &StructCode{ + typ: c.typ, + fields: fields, + isPtr: c.isPtr, + disableIndirectConversion: c.disableIndirectConversion, + isIndirect: c.isIndirect, + isRecursive: c.isRecursive, + } +} + +type StructFieldCode struct { + typ *runtime.Type + key string + tag *runtime.StructTag + value Code + offset uintptr + isAnonymous bool + isTaggedKey bool + isNilableType bool + isNilCheck bool + isAddrForMarshaler bool + isNextOpPtrType bool + isMarshalerContext bool +} + +func (c *StructFieldCode) getStruct() *StructCode { + value := c.value + ptr, ok := value.(*PtrCode) + if ok { + value = ptr.value + } + structCode, ok := value.(*StructCode) + if ok { + return structCode + } + return nil +} + +func (c *StructFieldCode) getAnonymousStruct() *StructCode { + if !c.isAnonymous { + return nil + } + return c.getStruct() +} + +func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType { + headType := code.ToHeaderType(tag.IsString) + if tag.IsOmitEmpty { + headType = headType.HeadToOmitEmptyHead() + } + return headType +} + +func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType { + fieldType := code.ToFieldType(tag.IsString) + if tag.IsOmitEmpty { + fieldType = fieldType.FieldToOmitEmptyField() + } + return fieldType +} + +func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructHeader(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + fieldCodes := Opcodes{field} + if op.IsMultipleOpHead() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructField(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + + fieldCodes := Opcodes{field} + if op.IsMultipleOpField() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes { + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + codes.Last().Next = end + codes.First().NextField = end + codes = codes.Add(end) + ctx.incOpcodeIndex() + return codes +} + +func (c *StructFieldCode) structKey(ctx *compileContext) string { + if ctx.escapeKey { + rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}} + return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key))) + } + return fmt.Sprintf(`"%s":`, c.key) +} + +func (c *StructFieldCode) flags() OpFlags { + var flags OpFlags + if c.isTaggedKey { + flags |= IsTaggedKeyFlags + } + if c.isNilableType { + flags |= IsNilableTypeFlags + } + if c.isNilCheck { + flags |= NilCheckFlags + } + if c.isAddrForMarshaler { + flags |= AddrForMarshalerFlags + } + if c.isNextOpPtrType { + flags |= IsNextOpPtrTypeFlags + } + if c.isAnonymous { + flags |= AnonymousKeyFlags + } + if c.isMarshalerContext { + flags |= MarshalerContextFlags + } + return flags +} + +func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes { + if c.isAnonymous { + anonymCode, ok := c.value.(AnonymousCode) + if ok { + return anonymCode.ToAnonymousOpcode(ctx) + } + } + return c.value.ToOpcode(ctx) +} + +func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags(), + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + codes := c.headerOpcodes(ctx, field, valueCodes) + if isEndField { + codes = c.addStructEndCode(ctx, codes) + } + return codes + } + codes := c.fieldOpcodes(ctx, field, valueCodes) + if isEndField { + if isEnableStructEndOptimization(c.value) { + field.Op = field.Op.FieldToEnd() + } else { + codes = c.addStructEndCode(ctx, codes) + } + } + return codes +} + +func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags() | AnonymousHeadFlags, + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + return c.headerOpcodes(ctx, field, valueCodes) + } + return c.fieldOpcodes(ctx, field, valueCodes) +} + +func isEnableStructEndOptimization(value Code) bool { + switch value.Kind() { + case CodeKindInt, + CodeKindUint, + CodeKindFloat, + CodeKindString, + CodeKindBool, + CodeKindBytes: + return true + case CodeKindPtr: + return isEnableStructEndOptimization(value.(*PtrCode).value) + default: + return false + } +} + +type InterfaceCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isPtr bool +} + +func (c *InterfaceCode) Kind() CodeKind { + return CodeKindInterface +} + +func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpInterfacePtr) + default: + code = newOpCode(ctx, c.typ, OpInterface) + } + code.FieldQuery = c.fieldQuery + if c.typ.NumMethod() > 0 { + code.Flags |= NonEmptyInterfaceFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *InterfaceCode) Filter(query *FieldQuery) Code { + return &InterfaceCode{ + typ: c.typ, + fieldQuery: query, + isPtr: c.isPtr, + } +} + +type MarshalJSONCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool + isMarshalerContext bool +} + +func (c *MarshalJSONCode) Kind() CodeKind { + return CodeKindMarshalJSON +} + +func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalJSON) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isMarshalerContext { + code.Flags |= MarshalerContextFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalJSONCode) Filter(query *FieldQuery) Code { + return &MarshalJSONCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + isMarshalerContext: c.isMarshalerContext, + } +} + +type MarshalTextCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool +} + +func (c *MarshalTextCode) Kind() CodeKind { + return CodeKindMarshalText +} + +func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalText) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalTextCode) Filter(query *FieldQuery) Code { + return &MarshalTextCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + } +} + +type PtrCode struct { + typ *runtime.Type + value Code + ptrNum uint8 +} + +func (c *PtrCode) Kind() CodeKind { + return CodeKindPtr +} + +func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes { + codes := c.value.ToOpcode(ctx) + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + var codes Opcodes + anonymCode, ok := c.value.(AnonymousCode) + if ok { + codes = anonymCode.ToAnonymousOpcode(ctx) + } else { + codes = c.value.ToOpcode(ctx) + } + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) Filter(query *FieldQuery) Code { + return &PtrCode{ + typ: c.typ, + value: c.value.Filter(query), + ptrNum: c.ptrNum, + } +} + +func convertPtrOp(code *Opcode) OpType { + ptrHeadOp := code.Op.HeadToPtrHead() + if code.Op != ptrHeadOp { + if code.PtrNum > 0 { + // ptr field and ptr head + code.PtrNum-- + } + return ptrHeadOp + } + switch code.Op { + case OpInt: + return OpIntPtr + case OpUint: + return OpUintPtr + case OpFloat32: + return OpFloat32Ptr + case OpFloat64: + return OpFloat64Ptr + case OpString: + return OpStringPtr + case OpBool: + return OpBoolPtr + case OpBytes: + return OpBytesPtr + case OpNumber: + return OpNumberPtr + case OpArray: + return OpArrayPtr + case OpSlice: + return OpSlicePtr + case OpMap: + return OpMapPtr + case OpMarshalJSON: + return OpMarshalJSONPtr + case OpMarshalText: + return OpMarshalTextPtr + case OpInterface: + return OpInterfacePtr + case OpRecursive: + return OpRecursivePtr + } + return code.Op +} + +func isEmbeddedStruct(field *StructFieldCode) bool { + if !field.isAnonymous { + return false + } + t := field.typ + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Kind() == reflect.Struct +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go new file mode 100644 index 0000000000..0eb9545d89 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go @@ -0,0 +1,286 @@ +package encoder + +import ( + "bytes" + "fmt" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +var ( + isWhiteSpace = [256]bool{ + ' ': true, + '\n': true, + '\t': true, + '\r': true, + } + isHTMLEscapeChar = [256]bool{ + '<': true, + '>': true, + '&': true, + } + nul = byte('\000') +) + +func Compact(buf *bytes.Buffer, src []byte, escape bool) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + buf.Grow(len(src)) + dst := buf.Bytes() + + ctx := TakeRuntimeContext() + ctxBuf := ctx.Buf[:0] + ctxBuf = append(append(ctxBuf, src...), nul) + ctx.Buf = ctxBuf + + if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil { + ReleaseRuntimeContext(ctx) + return err + } + ReleaseRuntimeContext(ctx) + return nil +} + +func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error { + dst, err := compact(dst, src, escape) + if err != nil { + return err + } + if _, err := buf.Write(dst); err != nil { + return err + } + return nil +} + +func compact(dst, src []byte, escape bool) ([]byte, error) { + buf, cursor, err := compactValue(dst, src, 0, escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { +LOOP: + if isWhiteSpace[buf[cursor]] { + cursor++ + goto LOOP + } + return cursor +} + +func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return compactObject(dst, src, cursor, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return compactArray(dst, src, cursor, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + var err error + for { + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + dst = append(dst, ':') + dst, cursor, err = compactValue(dst, src, cursor+1, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + var err error + for { + dst, cursor, err = compactValue(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after array value", cursor) + } + cursor++ + } +} + +func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] != '"' { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor) + } + start := cursor + for { + cursor++ + c := src[cursor] + if escape { + if isHTMLEscapeChar[c] { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = cursor + 1 + } else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[cursor+2]&0xF]) + cursor += 2 + start = cursor + 3 + } + } + switch c { + case '\\': + cursor++ + if src[cursor] == nul { + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + case '"': + cursor++ + return append(dst, src[start:cursor]...), cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + } +} + +func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) { + start := cursor + for { + cursor++ + if floatTable[src[cursor]] { + continue + } + break + } + num := src[start:cursor] + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil { + return nil, 0, err + } + dst = append(dst, num...) + return dst, cursor, nil +} + +func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor) + } + dst = append(dst, "true"...) + cursor += 4 + return dst, cursor, nil +} + +func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+4 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor) + } + dst = append(dst, "false"...) + cursor += 5 + return dst, cursor, nil +} + +func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor) + } + dst = append(dst, "null"...) + cursor += 4 + return dst, cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go new file mode 100644 index 0000000000..bf5e0f9475 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -0,0 +1,930 @@ +package encoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "sync/atomic" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type marshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +var ( + marshalJSONType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem() + marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + jsonNumberType = reflect.TypeOf(json.Number("")) + cachedOpcodeSets []*OpcodeSet + cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet + typeAddr *runtime.TypeAddr +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadOpcodeMap() map[uintptr]*OpcodeSet { + p := atomic.LoadPointer(&cachedOpcodeMap) + return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p)) +} + +func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) { + newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1) + newOpcodeMap[typ] = set + + for k, v := range m { + newOpcodeMap[k] = v + } + + atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap))) +} + +func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) { + opcodeMap := loadOpcodeMap() + if codeSet, exists := opcodeMap[typeptr]; exists { + return codeSet, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + storeOpcodeSet(typeptr, codeSet, opcodeMap) + return codeSet, nil +} + +func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) { + if (ctx.Option.Flag & ContextOption) == 0 { + return codeSet, nil + } + query := FieldQueryFromContext(ctx.Option.Context) + if query == nil { + return codeSet, nil + } + ctx.Option.Flag |= FieldQueryOption + cacheCodeSet := codeSet.getQueryCache(query.Hash()) + if cacheCodeSet != nil { + return cacheCodeSet, nil + } + queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query)) + if err != nil { + return nil, err + } + codeSet.setQueryCache(query.Hash(), queryCodeSet) + return queryCodeSet, nil +} + +type Compiler struct { + structTypeToCode map[uintptr]*StructCode +} + +func newCompiler() *Compiler { + return &Compiler{ + structTypeToCode: map[uintptr]*StructCode{}, + } +} + +func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) { + // noescape trick for header.typ ( reflect.*rtype ) + typ := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + code, err := c.typeToCode(typ) + if err != nil { + return nil, err + } + return c.codeToOpcodeSet(typ, code) +} + +func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) { + noescapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + }, typ, code) + if err := noescapeKeyCode.Validate(); err != nil { + return nil, err + } + escapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + escapeKey: true, + }, typ, code) + noescapeKeyCode = copyOpcode(noescapeKeyCode) + escapeKeyCode = copyOpcode(escapeKeyCode) + setTotalLengthToInterfaceOp(noescapeKeyCode) + setTotalLengthToInterfaceOp(escapeKeyCode) + interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode) + interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode) + codeLength := noescapeKeyCode.TotalLength() + return &OpcodeSet{ + Type: typ, + NoescapeKeyCode: noescapeKeyCode, + EscapeKeyCode: escapeKeyCode, + InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode, + InterfaceEscapeKeyCode: interfaceEscapeKeyCode, + CodeLength: codeLength, + EndCode: ToEndCode(interfaceNoescapeKeyCode), + Code: code, + QueryCache: map[string]*OpcodeSet{}, + }, nil +} + +func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + + isPtr := false + orgType := typ + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + isPtr = true + } + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(orgType) + case c.implementsMarshalText(typ): + return c.marshalTextCode(orgType) + } + switch typ.Kind() { + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, isPtr) + } + } + return c.sliceCode(typ) + case reflect.Map: + if isPtr { + return c.ptrCode(runtime.PtrTo(typ)) + } + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Int: + return c.intCode(typ, isPtr) + case reflect.Int8: + return c.int8Code(typ, isPtr) + case reflect.Int16: + return c.int16Code(typ, isPtr) + case reflect.Int32: + return c.int32Code(typ, isPtr) + case reflect.Int64: + return c.int64Code(typ, isPtr) + case reflect.Uint, reflect.Uintptr: + return c.uintCode(typ, isPtr) + case reflect.Uint8: + return c.uint8Code(typ, isPtr) + case reflect.Uint16: + return c.uint16Code(typ, isPtr) + case reflect.Uint32: + return c.uint32Code(typ, isPtr) + case reflect.Uint64: + return c.uint64Code(typ, isPtr) + case reflect.Float32: + return c.float32Code(typ, isPtr) + case reflect.Float64: + return c.float64Code(typ, isPtr) + case reflect.String: + return c.stringCode(typ, isPtr) + case reflect.Bool: + return c.boolCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, isPtr) + default: + if isPtr && typ.Implements(marshalTextType) { + typ = orgType + } + return c.typeToCodeWithPtr(typ, isPtr) + } +} + +func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, false) + } + } + return c.sliceCode(typ) + case reflect.Array: + return c.arrayCode(typ) + case reflect.Map: + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, false) + case reflect.Int: + return c.intCode(typ, false) + case reflect.Int8: + return c.int8Code(typ, false) + case reflect.Int16: + return c.int16Code(typ, false) + case reflect.Int32: + return c.int32Code(typ, false) + case reflect.Int64: + return c.int64Code(typ, false) + case reflect.Uint: + return c.uintCode(typ, false) + case reflect.Uint8: + return c.uint8Code(typ, false) + case reflect.Uint16: + return c.uint16Code(typ, false) + case reflect.Uint32: + return c.uint32Code(typ, false) + case reflect.Uint64: + return c.uint64Code(typ, false) + case reflect.Uintptr: + return c.uintCode(typ, false) + case reflect.Float32: + return c.float32Code(typ, false) + case reflect.Float64: + return c.float64Code(typ, false) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Bool: + return c.boolCode(typ, false) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +const intSize = 32 << (^uint(0) >> 63) + +//nolint:unparam +func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) { + return &StringCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) { + return &BoolCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) { + return &BytesCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) { + return &InterfaceCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) { + return &MarshalJSONCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalJSONType(typ), + isNilableType: c.isNilableType(typ), + isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType), + }, nil +} + +//nolint:unparam +func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) { + return &MarshalTextCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalTextType(typ), + isNilableType: c.isNilableType(typ), + }, nil +} + +func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) { + code, err := c.typeToCodeWithPtr(typ.Elem(), true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil + } + return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil +} + +func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &SliceCode{typ: typ, value: code}, nil +} + +func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &ArrayCode{typ: typ, value: code}, nil +} + +func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) { + keyCode, err := c.mapKeyCode(typ.Key()) + if err != nil { + return nil, err + } + valueCode, err := c.mapValueCode(typ.Elem()) + if err != nil { + return nil, err + } + if valueCode.Kind() == CodeKindStruct { + structCode := valueCode.(*StructCode) + structCode.enableIndirect() + } + return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil +} + +func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { + switch { + case c.isPtrMarshalJSONType(typ): + return c.marshalJSONCode(typ) + case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType): + return c.marshalTextCode(typ) + case typ.Kind() == reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + // isPtr was originally used to indicate whether the type of top level is pointer. + // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. + // See here for related issues: https://github.com/goccy/go-json/issues/370 + code, err := c.typeToCodeWithPtr(typ, true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Int: + return c.intStringCode(typ) + case reflect.Int8: + return c.int8StringCode(typ) + case reflect.Int16: + return c.int16StringCode(typ) + case reflect.Int32: + return c.int32StringCode(typ) + case reflect.Int64: + return c.int64StringCode(typ) + case reflect.Uint: + return c.uintStringCode(typ) + case reflect.Uint8: + return c.uint8StringCode(typ) + case reflect.Uint16: + return c.uint16StringCode(typ) + case reflect.Uint32: + return c.uint32StringCode(typ) + case reflect.Uint64: + return c.uint64StringCode(typ) + case reflect.Uintptr: + return c.uintStringCode(typ) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) { + switch typ.Kind() { + case reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + code, err := c.typeToCodeWithPtr(typ, false) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if code, exists := c.structTypeToCode[typeptr]; exists { + derefCode := *code + derefCode.isRecursive = true + return &derefCode, nil + } + indirect := runtime.IfaceIndir(typ) + code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect} + c.structTypeToCode[typeptr] = code + + fieldNum := typ.NumField() + tags := c.typeToStructTags(typ) + fields := []*StructFieldCode{} + for i, tag := range tags { + isOnlyOneFirstField := i == 0 && fieldNum == 1 + field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField) + if err != nil { + return nil, err + } + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil { + structCode.removeFieldsByTags(tags) + if c.isAssignableIndirect(field, isPtr) { + if indirect { + structCode.isIndirect = true + } else { + structCode.isIndirect = false + } + } + } + } else { + structCode := field.getStruct() + if structCode != nil { + if indirect { + // if parent is indirect type, set child indirect property to true + structCode.isIndirect = true + } else { + // if parent is not indirect type, set child indirect property to false. + // but if parent's indirect is false and isPtr is true, then indirect must be true. + // Do this only if indirectConversion is enabled at the end of compileStruct. + structCode.isIndirect = false + } + } + } + fields = append(fields, field) + } + fieldMap := c.getFieldMap(fields) + duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap) + code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap) + if !code.disableIndirectConversion && !indirect && isPtr { + code.enableIndirect() + } + delete(c.structTypeToCode, typeptr) + return code, nil +} + +func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { + field := tag.Field + fieldType := runtime.Type2RType(field.Type) + isIndirectSpecialCase := isPtr && isOnlyOneFirstField + fieldCode := &StructFieldCode{ + typ: fieldType, + key: tag.Key, + tag: tag, + offset: field.Offset, + isAnonymous: field.Anonymous && !tag.IsTaggedKey, + isTaggedKey: tag.IsTaggedKey, + isNilableType: c.isNilableType(fieldType), + isNilCheck: true, + } + switch { + case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case isPtr && c.isPtrMarshalJSONType(fieldType): + // *struct{ field T } + // func (*T) MarshalJSON() ([]byte, error) + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + case isPtr && c.isPtrMarshalTextType(fieldType): + // *struct{ field T } + // func (*T) MarshalText() ([]byte, error) + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + default: + code, err := c.typeToCodeWithPtr(fieldType, isPtr) + if err != nil { + return nil, err + } + switch code.Kind() { + case CodeKindPtr, CodeKindInterface: + fieldCode.isNextOpPtrType = true + } + fieldCode.value = code + } + return fieldCode, nil +} + +func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool { + if isPtr { + return false + } + codeType := fieldCode.value.Kind() + if codeType == CodeKindMarshalJSON { + return false + } + if codeType == CodeKindMarshalText { + return false + } + return true +} + +func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + structCode := field.getAnonymousStruct() + if structCode == nil || structCode.isRecursive { + fieldMap[field.key] = append(fieldMap[field.key], field) + return fieldMap + } + for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) { + fieldMap[k] = append(fieldMap[k], v...) + } + return fieldMap +} + +func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + // Do not handle tagged key when embedding more than once + for _, vv := range v { + vv.isTaggedKey = false + } + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} { + duplicatedFieldMap := map[*StructFieldCode]struct{}{} + for _, fields := range fieldMap { + if len(fields) == 1 { + continue + } + if c.isTaggedKeyOnly(fields) { + for _, field := range fields { + if field.isTaggedKey { + continue + } + duplicatedFieldMap[field] = struct{}{} + } + } else { + for _, field := range fields { + duplicatedFieldMap[field] = struct{}{} + } + } + } + return duplicatedFieldMap +} + +func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode { + filteredFields := make([]*StructFieldCode, 0, len(fields)) + for _, field := range fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap) + if len(structCode.fields) > 0 { + filteredFields = append(filteredFields, field) + } + continue + } + } + if _, exists := duplicatedFieldMap[field]; exists { + continue + } + filteredFields = append(filteredFields, field) + } + return filteredFields +} + +func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool { + var taggedKeyFieldCount int + for _, field := range fields { + if field.isTaggedKey { + taggedKeyFieldCount++ + } + } + return taggedKeyFieldCount == 1 +} + +func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalJSON() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ) +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalText() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ) +} + +func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool { + if !c.implementsMarshalJSONType(typ) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !c.implementsMarshalJSONType(typ.Elem()) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool { + if !typ.Implements(marshalTextType) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !typ.Elem().Implements(marshalTextType) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) isNilableType(typ *runtime.Type) bool { + if !runtime.IfaceIndir(typ) { + return true + } + switch typ.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Func: + return true + default: + return false + } +} + +func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType) +} + +func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool { + return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ)) +} + +func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool { + return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType) +} + +func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode { + codes := code.ToOpcode(ctx) + codes.Last().Next = newEndOp(ctx, typ) + c.linkRecursiveCode(ctx) + return codes.First() +} + +func (c *Compiler) linkRecursiveCode(ctx *compileContext) { + recursiveCodes := map[uintptr]*CompiledCode{} + for _, recursive := range *ctx.recursiveCodes { + typeptr := uintptr(unsafe.Pointer(recursive.Type)) + codes := ctx.structTypeToCodes[typeptr] + if recursiveCode, ok := recursiveCodes[typeptr]; ok { + *recursive.Jmp = *recursiveCode + continue + } + + code := copyOpcode(codes.First()) + code.Op = code.Op.PtrHeadToHead() + lastCode := newEndOp(&compileContext{}, recursive.Type) + lastCode.Op = OpRecursiveEnd + + // OpRecursiveEnd must set before call TotalLength + code.End.Next = lastCode + + totalLength := code.TotalLength() + + // Idx, ElemIdx, Length must set after call TotalLength + lastCode.Idx = uint32((totalLength + 1) * uintptrSize) + lastCode.ElemIdx = lastCode.Idx + uintptrSize + lastCode.Length = lastCode.Idx + 2*uintptrSize + + // extend length to alloc slot for elemIdx + length + curTotalLength := uintptr(recursive.TotalLength()) + 3 + nextTotalLength := uintptr(totalLength) + 3 + + compiled := recursive.Jmp + compiled.Code = code + compiled.CurLen = curTotalLength + compiled.NextLen = nextTotalLength + compiled.Linked = true + + recursiveCodes[typeptr] = compiled + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go new file mode 100644 index 0000000000..20c93cbf70 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -0,0 +1,32 @@ +//go:build !race +// +build !race + +package encoder + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + return filtered, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + cachedOpcodeSets[index] = codeSet + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go new file mode 100644 index 0000000000..13ba23fdff --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -0,0 +1,45 @@ +//go:build race +// +build race + +package encoder + +import ( + "sync" +) + +var setsMu sync.RWMutex + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + setsMu.RLock() + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + setsMu.RUnlock() + return nil, err + } + setsMu.RUnlock() + return filtered, nil + } + setsMu.RUnlock() + + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + setsMu.Lock() + cachedOpcodeSets[index] = codeSet + setsMu.Unlock() + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/context.go b/vendor/github.com/goccy/go-json/internal/encoder/context.go new file mode 100644 index 0000000000..3833d0c86d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/context.go @@ -0,0 +1,105 @@ +package encoder + +import ( + "context" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type compileContext struct { + opcodeIndex uint32 + ptrIndex int + indent uint32 + escapeKey bool + structTypeToCodes map[uintptr]Opcodes + recursiveCodes *Opcodes +} + +func (c *compileContext) incIndent() { + c.indent++ +} + +func (c *compileContext) decIndent() { + c.indent-- +} + +func (c *compileContext) incIndex() { + c.incOpcodeIndex() + c.incPtrIndex() +} + +func (c *compileContext) decIndex() { + c.decOpcodeIndex() + c.decPtrIndex() +} + +func (c *compileContext) incOpcodeIndex() { + c.opcodeIndex++ +} + +func (c *compileContext) decOpcodeIndex() { + c.opcodeIndex-- +} + +func (c *compileContext) incPtrIndex() { + c.ptrIndex++ +} + +func (c *compileContext) decPtrIndex() { + c.ptrIndex-- +} + +const ( + bufSize = 1024 +) + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Buf: make([]byte, 0, bufSize), + Ptrs: make([]uintptr, 128), + KeepRefs: make([]unsafe.Pointer, 0, 8), + Option: &Option{}, + } + }, + } +) + +type RuntimeContext struct { + Context context.Context + Buf []byte + MarshalBuf []byte + Ptrs []uintptr + KeepRefs []unsafe.Pointer + SeenPtr []uintptr + BaseIndent uint32 + Prefix []byte + IndentStr []byte + Option *Option +} + +func (c *RuntimeContext) Init(p uintptr, codelen int) { + if len(c.Ptrs) < codelen { + c.Ptrs = make([]uintptr, codelen) + } + c.Ptrs[0] = p + c.KeepRefs = c.KeepRefs[:0] + c.SeenPtr = c.SeenPtr[:0] + c.BaseIndent = 0 +} + +func (c *RuntimeContext) Ptr() uintptr { + header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs)) + return uintptr(header.Data) +} + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go new file mode 100644 index 0000000000..35c959d481 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go @@ -0,0 +1,126 @@ +package encoder + +import "unicode/utf8" + +const ( + // The default lowest and highest continuation byte. + locb = 128 //0b10000000 + hicb = 191 //0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + lineSep = byte(168) //'\u2028' + paragraphSep = byte(169) //'\u2029' +) + +type decodeRuneState int + +const ( + validUTF8State decodeRuneState = iota + runeErrorState + lineSepState + paragraphSepState +) + +func decodeRuneInString(s string) (decodeRuneState, int) { + n := len(s) + s0 := s[0] + x := first[s0] + if x >= as { + // The following code simulates an additional check for x == xx and + // handling the ASCII and invalid cases accordingly. This mask-and-or + // approach prevents an additional branch. + mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF. + if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError { + return runeErrorState, 1 + } + return validUTF8State, 1 + } + sz := int(x & 7) + if n < sz { + return runeErrorState, 1 + } + s1 := s[1] + switch x >> 4 { + case 0: + if s1 < locb || hicb < s1 { + return runeErrorState, 1 + } + case 1: + if s1 < 0xA0 || hicb < s1 { + return runeErrorState, 1 + } + case 2: + if s1 < locb || 0x9F < s1 { + return runeErrorState, 1 + } + case 3: + if s1 < 0x90 || hicb < s1 { + return runeErrorState, 1 + } + case 4: + if s1 < locb || 0x8F < s1 { + return runeErrorState, 1 + } + } + if sz <= 2 { + return validUTF8State, 2 + } + s2 := s[2] + if s2 < locb || hicb < s2 { + return runeErrorState, 1 + } + if sz <= 3 { + // separator character prefixes: [2]byte{226, 128} + if s0 == 226 && s1 == 128 { + switch s2 { + case lineSep: + return lineSepState, 3 + case paragraphSep: + return paragraphSepState, 3 + } + } + return validUTF8State, 3 + } + s3 := s[3] + if s3 < locb || hicb < s3 { + return runeErrorState, 1 + } + return validUTF8State, 4 +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go new file mode 100644 index 0000000000..14eb6a0d64 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -0,0 +1,596 @@ +package encoder + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +func (t OpType) IsMultipleOpHead() bool { + switch t { + case OpStructHead: + return true + case OpStructHeadSlice: + return true + case OpStructHeadArray: + return true + case OpStructHeadMap: + return true + case OpStructHeadStruct: + return true + case OpStructHeadOmitEmpty: + return true + case OpStructHeadOmitEmptySlice: + return true + case OpStructHeadOmitEmptyArray: + return true + case OpStructHeadOmitEmptyMap: + return true + case OpStructHeadOmitEmptyStruct: + return true + case OpStructHeadSlicePtr: + return true + case OpStructHeadOmitEmptySlicePtr: + return true + case OpStructHeadArrayPtr: + return true + case OpStructHeadOmitEmptyArrayPtr: + return true + case OpStructHeadMapPtr: + return true + case OpStructHeadOmitEmptyMapPtr: + return true + } + return false +} + +func (t OpType) IsMultipleOpField() bool { + switch t { + case OpStructField: + return true + case OpStructFieldSlice: + return true + case OpStructFieldArray: + return true + case OpStructFieldMap: + return true + case OpStructFieldStruct: + return true + case OpStructFieldOmitEmpty: + return true + case OpStructFieldOmitEmptySlice: + return true + case OpStructFieldOmitEmptyArray: + return true + case OpStructFieldOmitEmptyMap: + return true + case OpStructFieldOmitEmptyStruct: + return true + case OpStructFieldSlicePtr: + return true + case OpStructFieldOmitEmptySlicePtr: + return true + case OpStructFieldArrayPtr: + return true + case OpStructFieldOmitEmptyArrayPtr: + return true + case OpStructFieldMapPtr: + return true + case OpStructFieldOmitEmptyMapPtr: + return true + } + return false +} + +type OpcodeSet struct { + Type *runtime.Type + NoescapeKeyCode *Opcode + EscapeKeyCode *Opcode + InterfaceNoescapeKeyCode *Opcode + InterfaceEscapeKeyCode *Opcode + CodeLength int + EndCode *Opcode + Code Code + QueryCache map[string]*OpcodeSet + cacheMu sync.RWMutex +} + +func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet { + s.cacheMu.RLock() + codeSet := s.QueryCache[hash] + s.cacheMu.RUnlock() + return codeSet +} + +func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) { + s.cacheMu.Lock() + s.QueryCache[hash] = codeSet + s.cacheMu.Unlock() +} + +type CompiledCode struct { + Code *Opcode + Linked bool // whether recursive code already have linked + CurLen uintptr + NextLen uintptr +} + +const StartDetectingCyclesAfter = 1000 + +func Load(base uintptr, idx uintptr) uintptr { + addr := base + idx + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func Store(base uintptr, idx uintptr, p uintptr) { + addr := base + idx + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr { + addr := base + idx + p := **(**uintptr)(unsafe.Pointer(&addr)) + if p == 0 { + return 0 + } + return PtrToPtr(p) + /* + for i := 0; i < ptrNum; i++ { + if p == 0 { + return p + } + p = PtrToPtr(p) + } + return p + */ +} + +func PtrToUint64(p uintptr) uint64 { return **(**uint64)(unsafe.Pointer(&p)) } +func PtrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func PtrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func PtrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func PtrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func PtrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func PtrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func PtrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func PtrToNPtr(p uintptr, ptrNum int) uintptr { + for i := 0; i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = PtrToPtr(p) + } + return p +} + +func PtrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func PtrToInterface(code *Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError { + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)), + })) + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: fmt.Sprintf("encountered a cycle via %s", code.Type), + } +} + +func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError { + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: strconv.FormatFloat(v, 'g', -1, 64), + } +} + +func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError { + return &errors.MarshalerError{ + Type: runtime.RType2Type(code.Type), + Err: err, + } +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type MapItem struct { + Key []byte + Value []byte +} + +type Mapslice struct { + Items []MapItem +} + +func (m *Mapslice) Len() int { + return len(m.Items) +} + +func (m *Mapslice) Less(i, j int) bool { + return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0 +} + +func (m *Mapslice) Swap(i, j int) { + m.Items[i], m.Items[j] = m.Items[j], m.Items[i] +} + +//nolint:structcheck,unused +type mapIter struct { + key unsafe.Pointer + elem unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow unsafe.Pointer + oldoverflow unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +type MapContext struct { + Start int + First int + Idx int + Slice *Mapslice + Buf []byte + Len int + Iter mapIter +} + +var mapContextPool = sync.Pool{ + New: func() interface{} { + return &MapContext{ + Slice: &Mapslice{}, + } + }, +} + +func NewMapContext(mapLen int, unorderedMap bool) *MapContext { + ctx := mapContextPool.Get().(*MapContext) + if !unorderedMap { + if len(ctx.Slice.Items) < mapLen { + ctx.Slice.Items = make([]MapItem, mapLen) + } else { + ctx.Slice.Items = ctx.Slice.Items[:mapLen] + } + } + ctx.Buf = ctx.Buf[:0] + ctx.Iter = mapIter{} + ctx.Idx = 0 + ctx.Len = mapLen + return ctx +} + +func ReleaseMapContext(c *MapContext) { + mapContextPool.Put(c) +} + +//go:linkname MapIterInit runtime.mapiterinit +//go:noescape +func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter) + +//go:linkname MapIterKey reflect.mapiterkey +//go:noescape +func MapIterKey(it *mapIter) unsafe.Pointer + +//go:linkname MapIterNext reflect.mapiternext +//go:noescape +func MapIterNext(it *mapIter) + +//go:linkname MapLen reflect.maplen +//go:noescape +func MapLen(m unsafe.Pointer) int + +func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte { + if src == nil { + return append(b, `null`...) + } + encodedLen := base64.StdEncoding.EncodedLen(len(src)) + b = append(b, '"') + pos := len(b) + remainLen := cap(b[pos:]) + var buf []byte + if remainLen > encodedLen { + buf = b[pos : pos+encodedLen] + } else { + buf = make([]byte, encodedLen) + } + base64.StdEncoding.Encode(buf, src) + return append(append(b, buf...), '"') +} + +func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte { + f64 := float64(v) + abs := math.Abs(f64) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + f32 := float32(abs) + if f32 < 1e-6 || f32 >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, f64, fmt, -1, 32) +} + +func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte { + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, v, fmt, -1, 64) +} + +func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } +) + +func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) { + if len(n) == 0 { + return append(b, '0'), nil + } + for i := 0; i < len(n); i++ { + if !floatTable[n[i]] { + return nil, fmt.Errorf("json: invalid number literal %q", n) + } + } + b = append(b, n...) + return b, nil +} + +func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + stdctx := ctx.Option.Context + if ctx.Option.Flag&FieldQueryOption != 0 { + stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery) + } + b, err := marshaler.MarshalJSON(stdctx) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return compactedBuf, nil +} + +func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON(ctx.Option.Context) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + indentedBuf, err := doIndent( + b, + marshalBuf, + string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)), + string(ctx.IndentStr), + (ctx.Option.Flag&HTMLEscapeOption) != 0, + ) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return indentedBuf, nil +} + +func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendNull(_ *RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func AppendComma(_ *RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func AppendStructEnd(_ *RuntimeContext, b []byte) []byte { + return append(b, '}', ',') +} + +func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte { + b = append(b, '\n') + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + code.Indent - 1 + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return append(b, '}', ',', '\n') +} + +func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte { + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + indent + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return b +} + +func IsNilForMarshaler(v interface{}) bool { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Bool: + return !rv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(rv.Float()) == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func: + return rv.IsNil() + case reflect.Slice: + return rv.IsNil() || rv.Len() == 0 + case reflect.String: + return rv.Len() == 0 + } + return false +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/indent.go b/vendor/github.com/goccy/go-json/internal/encoder/indent.go new file mode 100644 index 0000000000..dfe04b5e3c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/indent.go @@ -0,0 +1,211 @@ +package encoder + +import ( + "bytes" + "fmt" + + "github.com/goccy/go-json/internal/errors" +) + +func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) { + ctx := TakeRuntimeContext() + buf := ctx.Buf[:0] + buf = append(append(buf, src...), nul) + ctx.Buf = buf + return ctx, buf +} + +func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + + srcCtx, srcBuf := takeIndentSrcRuntimeContext(src) + dstCtx := TakeRuntimeContext() + dst := dstCtx.Buf[:0] + + dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr) + if err != nil { + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return err + } + dstCtx.Buf = dst + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return nil +} + +func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) { + dst, err := doIndent(dst, src, prefix, indentStr, false) + if err != nil { + return nil, err + } + if _, err := buf.Write(dst); err != nil { + return nil, err + } + return dst, nil +} + +func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) { + buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func indentValue( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func indentObject( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key", src[cursor]), + cursor+1, + ) + } + dst = append(dst, ':', ' ') + dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} + +func indentArray( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after array value", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go new file mode 100644 index 0000000000..85f0796098 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go @@ -0,0 +1,152 @@ +package encoder + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func numMask(numBitSize uint8) uint64 { + return 1<>(code.NumBitSize-1))&1 == 1 + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n & mask + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} + +func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { + var u64 uint64 + switch code.NumBitSize { + case 8: + u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + u64 = **(**uint64)(unsafe.Pointer(&p)) + } + mask := numMask(code.NumBitSize) + n := u64 & mask + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + return append(out, b[i:]...) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map112.go b/vendor/github.com/goccy/go-json/internal/encoder/map112.go new file mode 100644 index 0000000000..e96ffadf7a --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map112.go @@ -0,0 +1,9 @@ +//go:build !go1.13 +// +build !go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapitervalue +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map113.go b/vendor/github.com/goccy/go-json/internal/encoder/map113.go new file mode 100644 index 0000000000..9b69dcc360 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map113.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapiterelem +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go new file mode 100644 index 0000000000..05fc3ce049 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go @@ -0,0 +1,669 @@ +package encoder + +import ( + "fmt" + "strings" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +type OpFlags uint16 + +const ( + AnonymousHeadFlags OpFlags = 1 << 0 + AnonymousKeyFlags OpFlags = 1 << 1 + IndirectFlags OpFlags = 1 << 2 + IsTaggedKeyFlags OpFlags = 1 << 3 + NilCheckFlags OpFlags = 1 << 4 + AddrForMarshalerFlags OpFlags = 1 << 5 + IsNextOpPtrTypeFlags OpFlags = 1 << 6 + IsNilableTypeFlags OpFlags = 1 << 7 + MarshalerContextFlags OpFlags = 1 << 8 + NonEmptyInterfaceFlags OpFlags = 1 << 9 +) + +type Opcode struct { + Op OpType // operation type + Idx uint32 // offset to access ptr + Next *Opcode // next opcode + End *Opcode // array/slice/struct/map end + NextField *Opcode // next struct field + Key string // struct field key + Offset uint32 // offset size from struct header + PtrNum uint8 // pointer number: e.g. double pointer is 2. + NumBitSize uint8 + Flags OpFlags + + Type *runtime.Type // go type + Jmp *CompiledCode // for recursive call + FieldQuery *FieldQuery // field query for Interface / MarshalJSON / MarshalText + ElemIdx uint32 // offset to access array/slice elem + Length uint32 // offset to access slice length or array length + Indent uint32 // indent number + Size uint32 // array/slice elem size + DisplayIdx uint32 // opcode index + DisplayKey string // key text to display +} + +func (c *Opcode) Validate() error { + var prevIdx uint32 + for code := c; !code.IsEnd(); { + if prevIdx != 0 { + if code.DisplayIdx != prevIdx+1 { + return fmt.Errorf( + "invalid index. previous display index is %d but next is %d. dump = %s", + prevIdx, code.DisplayIdx, c.Dump(), + ) + } + } + prevIdx = code.DisplayIdx + code = code.IterNext() + } + return nil +} + +func (c *Opcode) IterNext() *Opcode { + if c == nil { + return nil + } + switch c.Op.CodeType() { + case CodeArrayElem, CodeSliceElem, CodeMapKey: + return c.End + default: + return c.Next + } +} + +func (c *Opcode) IsEnd() bool { + if c == nil { + return true + } + return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd +} + +func (c *Opcode) MaxIdx() uint32 { + max := uint32(0) + for _, value := range []uint32{ + c.Idx, + c.ElemIdx, + c.Length, + c.Size, + } { + if max < value { + max = value + } + } + return max +} + +func (c *Opcode) ToHeaderType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructHeadIntString + } + return OpStructHeadInt + case OpIntPtr: + if isString { + return OpStructHeadIntPtrString + } + return OpStructHeadIntPtr + case OpUint: + if isString { + return OpStructHeadUintString + } + return OpStructHeadUint + case OpUintPtr: + if isString { + return OpStructHeadUintPtrString + } + return OpStructHeadUintPtr + case OpFloat32: + if isString { + return OpStructHeadFloat32String + } + return OpStructHeadFloat32 + case OpFloat32Ptr: + if isString { + return OpStructHeadFloat32PtrString + } + return OpStructHeadFloat32Ptr + case OpFloat64: + if isString { + return OpStructHeadFloat64String + } + return OpStructHeadFloat64 + case OpFloat64Ptr: + if isString { + return OpStructHeadFloat64PtrString + } + return OpStructHeadFloat64Ptr + case OpString: + if isString { + return OpStructHeadStringString + } + return OpStructHeadString + case OpStringPtr: + if isString { + return OpStructHeadStringPtrString + } + return OpStructHeadStringPtr + case OpNumber: + if isString { + return OpStructHeadNumberString + } + return OpStructHeadNumber + case OpNumberPtr: + if isString { + return OpStructHeadNumberPtrString + } + return OpStructHeadNumberPtr + case OpBool: + if isString { + return OpStructHeadBoolString + } + return OpStructHeadBool + case OpBoolPtr: + if isString { + return OpStructHeadBoolPtrString + } + return OpStructHeadBoolPtr + case OpBytes: + return OpStructHeadBytes + case OpBytesPtr: + return OpStructHeadBytesPtr + case OpMap: + return OpStructHeadMap + case OpMapPtr: + c.Op = OpMap + return OpStructHeadMapPtr + case OpArray: + return OpStructHeadArray + case OpArrayPtr: + c.Op = OpArray + return OpStructHeadArrayPtr + case OpSlice: + return OpStructHeadSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructHeadSlicePtr + case OpMarshalJSON: + return OpStructHeadMarshalJSON + case OpMarshalJSONPtr: + return OpStructHeadMarshalJSONPtr + case OpMarshalText: + return OpStructHeadMarshalText + case OpMarshalTextPtr: + return OpStructHeadMarshalTextPtr + } + return OpStructHead +} + +func (c *Opcode) ToFieldType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructFieldIntString + } + return OpStructFieldInt + case OpIntPtr: + if isString { + return OpStructFieldIntPtrString + } + return OpStructFieldIntPtr + case OpUint: + if isString { + return OpStructFieldUintString + } + return OpStructFieldUint + case OpUintPtr: + if isString { + return OpStructFieldUintPtrString + } + return OpStructFieldUintPtr + case OpFloat32: + if isString { + return OpStructFieldFloat32String + } + return OpStructFieldFloat32 + case OpFloat32Ptr: + if isString { + return OpStructFieldFloat32PtrString + } + return OpStructFieldFloat32Ptr + case OpFloat64: + if isString { + return OpStructFieldFloat64String + } + return OpStructFieldFloat64 + case OpFloat64Ptr: + if isString { + return OpStructFieldFloat64PtrString + } + return OpStructFieldFloat64Ptr + case OpString: + if isString { + return OpStructFieldStringString + } + return OpStructFieldString + case OpStringPtr: + if isString { + return OpStructFieldStringPtrString + } + return OpStructFieldStringPtr + case OpNumber: + if isString { + return OpStructFieldNumberString + } + return OpStructFieldNumber + case OpNumberPtr: + if isString { + return OpStructFieldNumberPtrString + } + return OpStructFieldNumberPtr + case OpBool: + if isString { + return OpStructFieldBoolString + } + return OpStructFieldBool + case OpBoolPtr: + if isString { + return OpStructFieldBoolPtrString + } + return OpStructFieldBoolPtr + case OpBytes: + return OpStructFieldBytes + case OpBytesPtr: + return OpStructFieldBytesPtr + case OpMap: + return OpStructFieldMap + case OpMapPtr: + c.Op = OpMap + return OpStructFieldMapPtr + case OpArray: + return OpStructFieldArray + case OpArrayPtr: + c.Op = OpArray + return OpStructFieldArrayPtr + case OpSlice: + return OpStructFieldSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructFieldSlicePtr + case OpMarshalJSON: + return OpStructFieldMarshalJSON + case OpMarshalJSONPtr: + return OpStructFieldMarshalJSONPtr + case OpMarshalText: + return OpStructFieldMarshalText + case OpMarshalTextPtr: + return OpStructFieldMarshalTextPtr + } + return OpStructField +} + +func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode { + return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ)) +} + +func opcodeOffset(idx int) uint32 { + return uint32(idx) * uintptrSize +} + +func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode { + addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{}) + return *(**Opcode)(unsafe.Pointer(&addr)) +} + +func copyOpcode(code *Opcode) *Opcode { + codeNum := ToEndCode(code).DisplayIdx + 1 + codeSlice := make([]Opcode, codeNum) + head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data) + ptr := head + c := code + for { + *ptr = Opcode{ + Op: c.Op, + Key: c.Key, + PtrNum: c.PtrNum, + NumBitSize: c.NumBitSize, + Flags: c.Flags, + Idx: c.Idx, + Offset: c.Offset, + Type: c.Type, + FieldQuery: c.FieldQuery, + DisplayIdx: c.DisplayIdx, + DisplayKey: c.DisplayKey, + ElemIdx: c.ElemIdx, + Length: c.Length, + Size: c.Size, + Indent: c.Indent, + Jmp: c.Jmp, + } + if c.End != nil { + ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx) + } + if c.NextField != nil { + ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx) + } + if c.Next != nil { + ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx) + } + if c.IsEnd() { + break + } + ptr = getCodeAddrByIdx(head, c.DisplayIdx+1) + c = c.IterNext() + } + return head +} + +func setTotalLengthToInterfaceOp(code *Opcode) { + for c := code; !c.IsEnd(); { + if c.Op == OpInterface || c.Op == OpInterfacePtr { + c.Length = uint32(code.TotalLength()) + } + c = c.IterNext() + } +} + +func ToEndCode(code *Opcode) *Opcode { + c := code + for !c.IsEnd() { + c = c.IterNext() + } + return c +} + +func copyToInterfaceOpcode(code *Opcode) *Opcode { + copied := copyOpcode(code) + c := copied + c = ToEndCode(c) + c.Idx += uintptrSize + c.ElemIdx = c.Idx + uintptrSize + c.Length = c.Idx + 2*uintptrSize + c.Op = OpInterfaceEnd + return copied +} + +func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode { + return &Opcode{ + Op: op, + Idx: opcodeOffset(ctx.ptrIndex), + Next: next, + Type: typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode { + return newOpCodeWithNext(ctx, typ, OpEnd, nil) +} + +func (c *Opcode) TotalLength() int { + var idx int + code := c + for !code.IsEnd() { + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + if code.Op == OpRecursiveEnd { + break + } + code = code.IterNext() + } + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + return idx + 1 +} + +func (c *Opcode) dumpHead(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayHead { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + ) +} + +func (c *Opcode) dumpMapHead(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpMapEnd(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpElem(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayElem { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + code.Size, + ) +} + +func (c *Opcode) dumpField(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][key:%s][offset:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.DisplayKey, + code.Offset, + ) +} + +func (c *Opcode) dumpKey(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpValue(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) Dump() string { + codes := []string{} + for code := c; !code.IsEnd(); { + switch code.Op.CodeType() { + case CodeSliceHead: + codes = append(codes, c.dumpHead(code)) + code = code.Next + case CodeMapHead: + codes = append(codes, c.dumpMapHead(code)) + code = code.Next + case CodeArrayElem, CodeSliceElem: + codes = append(codes, c.dumpElem(code)) + code = code.End + case CodeMapKey: + codes = append(codes, c.dumpKey(code)) + code = code.End + case CodeMapValue: + codes = append(codes, c.dumpValue(code)) + code = code.Next + case CodeMapEnd: + codes = append(codes, c.dumpMapEnd(code)) + code = code.Next + case CodeStructField: + codes = append(codes, c.dumpField(code)) + code = code.Next + case CodeStructEnd: + codes = append(codes, c.dumpField(code)) + code = code.Next + default: + codes = append(codes, fmt.Sprintf( + "[%03d]%s%s ([idx:%d])", + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + )) + code = code.Next + } + } + return strings.Join(codes, "\n") +} + +func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + length := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpSlice, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Length: length, + Indent: ctx.indent, + } +} + +func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode { + return &Opcode{ + Op: OpSliceElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: head.Length, + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpArray, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Indent: ctx.indent, + Length: uint32(alen), + } +} + +func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode { + return &Opcode{ + Op: OpArrayElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: uint32(length), + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + return &Opcode{ + Op: OpMap, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapKey, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapValue, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapEnd, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Next: newEndOp(ctx, typ), + } +} + +func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode { + return &Opcode{ + Op: OpRecursive, + Type: typ, + Idx: opcodeOffset(ctx.ptrIndex), + Next: newEndOp(ctx, typ), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Jmp: jmp, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go new file mode 100644 index 0000000000..82d5ce3e7b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go @@ -0,0 +1,47 @@ +package encoder + +import ( + "context" + "io" +) + +type OptionFlag uint8 + +const ( + HTMLEscapeOption OptionFlag = 1 << iota + IndentOption + UnorderedMapOption + DebugOption + ColorizeOption + ContextOption + NormalizeUTF8Option + FieldQueryOption +) + +type Option struct { + Flag OptionFlag + ColorScheme *ColorScheme + Context context.Context + DebugOut io.Writer +} + +type EncodeFormat struct { + Header string + Footer string +} + +type EncodeFormatScheme struct { + Int EncodeFormat + Uint EncodeFormat + Float EncodeFormat + Bool EncodeFormat + String EncodeFormat + Binary EncodeFormat + ObjectKey EncodeFormat + Null EncodeFormat +} + +type ( + ColorScheme = EncodeFormatScheme + ColorFormat = EncodeFormat +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/optype.go b/vendor/github.com/goccy/go-json/internal/encoder/optype.go new file mode 100644 index 0000000000..5c1241b47d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/optype.go @@ -0,0 +1,932 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package encoder + +import ( + "strings" +) + +type CodeType int + +const ( + CodeOp CodeType = 0 + CodeArrayHead CodeType = 1 + CodeArrayElem CodeType = 2 + CodeSliceHead CodeType = 3 + CodeSliceElem CodeType = 4 + CodeMapHead CodeType = 5 + CodeMapKey CodeType = 6 + CodeMapValue CodeType = 7 + CodeMapEnd CodeType = 8 + CodeRecursive CodeType = 9 + CodeStructField CodeType = 10 + CodeStructEnd CodeType = 11 +) + +var opTypeStrings = [400]string{ + "End", + "Interface", + "Ptr", + "SliceElem", + "SliceEnd", + "ArrayElem", + "ArrayEnd", + "MapKey", + "MapValue", + "MapEnd", + "Recursive", + "RecursivePtr", + "RecursiveEnd", + "InterfaceEnd", + "Int", + "Uint", + "Float32", + "Float64", + "Bool", + "String", + "Bytes", + "Number", + "Array", + "Map", + "Slice", + "Struct", + "MarshalJSON", + "MarshalText", + "IntString", + "UintString", + "Float32String", + "Float64String", + "BoolString", + "StringString", + "NumberString", + "IntPtr", + "UintPtr", + "Float32Ptr", + "Float64Ptr", + "BoolPtr", + "StringPtr", + "BytesPtr", + "NumberPtr", + "ArrayPtr", + "MapPtr", + "SlicePtr", + "MarshalJSONPtr", + "MarshalTextPtr", + "InterfacePtr", + "IntPtrString", + "UintPtrString", + "Float32PtrString", + "Float64PtrString", + "BoolPtrString", + "StringPtrString", + "NumberPtrString", + "StructHeadInt", + "StructHeadOmitEmptyInt", + "StructPtrHeadInt", + "StructPtrHeadOmitEmptyInt", + "StructHeadUint", + "StructHeadOmitEmptyUint", + "StructPtrHeadUint", + "StructPtrHeadOmitEmptyUint", + "StructHeadFloat32", + "StructHeadOmitEmptyFloat32", + "StructPtrHeadFloat32", + "StructPtrHeadOmitEmptyFloat32", + "StructHeadFloat64", + "StructHeadOmitEmptyFloat64", + "StructPtrHeadFloat64", + "StructPtrHeadOmitEmptyFloat64", + "StructHeadBool", + "StructHeadOmitEmptyBool", + "StructPtrHeadBool", + "StructPtrHeadOmitEmptyBool", + "StructHeadString", + "StructHeadOmitEmptyString", + "StructPtrHeadString", + "StructPtrHeadOmitEmptyString", + "StructHeadBytes", + "StructHeadOmitEmptyBytes", + "StructPtrHeadBytes", + "StructPtrHeadOmitEmptyBytes", + "StructHeadNumber", + "StructHeadOmitEmptyNumber", + "StructPtrHeadNumber", + "StructPtrHeadOmitEmptyNumber", + "StructHeadArray", + "StructHeadOmitEmptyArray", + "StructPtrHeadArray", + "StructPtrHeadOmitEmptyArray", + "StructHeadMap", + "StructHeadOmitEmptyMap", + "StructPtrHeadMap", + "StructPtrHeadOmitEmptyMap", + "StructHeadSlice", + "StructHeadOmitEmptySlice", + "StructPtrHeadSlice", + "StructPtrHeadOmitEmptySlice", + "StructHeadStruct", + "StructHeadOmitEmptyStruct", + "StructPtrHeadStruct", + "StructPtrHeadOmitEmptyStruct", + "StructHeadMarshalJSON", + "StructHeadOmitEmptyMarshalJSON", + "StructPtrHeadMarshalJSON", + "StructPtrHeadOmitEmptyMarshalJSON", + "StructHeadMarshalText", + "StructHeadOmitEmptyMarshalText", + "StructPtrHeadMarshalText", + "StructPtrHeadOmitEmptyMarshalText", + "StructHeadIntString", + "StructHeadOmitEmptyIntString", + "StructPtrHeadIntString", + "StructPtrHeadOmitEmptyIntString", + "StructHeadUintString", + "StructHeadOmitEmptyUintString", + "StructPtrHeadUintString", + "StructPtrHeadOmitEmptyUintString", + "StructHeadFloat32String", + "StructHeadOmitEmptyFloat32String", + "StructPtrHeadFloat32String", + "StructPtrHeadOmitEmptyFloat32String", + "StructHeadFloat64String", + "StructHeadOmitEmptyFloat64String", + "StructPtrHeadFloat64String", + "StructPtrHeadOmitEmptyFloat64String", + "StructHeadBoolString", + "StructHeadOmitEmptyBoolString", + "StructPtrHeadBoolString", + "StructPtrHeadOmitEmptyBoolString", + "StructHeadStringString", + "StructHeadOmitEmptyStringString", + "StructPtrHeadStringString", + "StructPtrHeadOmitEmptyStringString", + "StructHeadNumberString", + "StructHeadOmitEmptyNumberString", + "StructPtrHeadNumberString", + "StructPtrHeadOmitEmptyNumberString", + "StructHeadIntPtr", + "StructHeadOmitEmptyIntPtr", + "StructPtrHeadIntPtr", + "StructPtrHeadOmitEmptyIntPtr", + "StructHeadUintPtr", + "StructHeadOmitEmptyUintPtr", + "StructPtrHeadUintPtr", + "StructPtrHeadOmitEmptyUintPtr", + "StructHeadFloat32Ptr", + "StructHeadOmitEmptyFloat32Ptr", + "StructPtrHeadFloat32Ptr", + "StructPtrHeadOmitEmptyFloat32Ptr", + "StructHeadFloat64Ptr", + "StructHeadOmitEmptyFloat64Ptr", + "StructPtrHeadFloat64Ptr", + "StructPtrHeadOmitEmptyFloat64Ptr", + "StructHeadBoolPtr", + "StructHeadOmitEmptyBoolPtr", + "StructPtrHeadBoolPtr", + "StructPtrHeadOmitEmptyBoolPtr", + "StructHeadStringPtr", + "StructHeadOmitEmptyStringPtr", + "StructPtrHeadStringPtr", + "StructPtrHeadOmitEmptyStringPtr", + "StructHeadBytesPtr", + "StructHeadOmitEmptyBytesPtr", + "StructPtrHeadBytesPtr", + "StructPtrHeadOmitEmptyBytesPtr", + "StructHeadNumberPtr", + "StructHeadOmitEmptyNumberPtr", + "StructPtrHeadNumberPtr", + "StructPtrHeadOmitEmptyNumberPtr", + "StructHeadArrayPtr", + "StructHeadOmitEmptyArrayPtr", + "StructPtrHeadArrayPtr", + "StructPtrHeadOmitEmptyArrayPtr", + "StructHeadMapPtr", + "StructHeadOmitEmptyMapPtr", + "StructPtrHeadMapPtr", + "StructPtrHeadOmitEmptyMapPtr", + "StructHeadSlicePtr", + "StructHeadOmitEmptySlicePtr", + "StructPtrHeadSlicePtr", + "StructPtrHeadOmitEmptySlicePtr", + "StructHeadMarshalJSONPtr", + "StructHeadOmitEmptyMarshalJSONPtr", + "StructPtrHeadMarshalJSONPtr", + "StructPtrHeadOmitEmptyMarshalJSONPtr", + "StructHeadMarshalTextPtr", + "StructHeadOmitEmptyMarshalTextPtr", + "StructPtrHeadMarshalTextPtr", + "StructPtrHeadOmitEmptyMarshalTextPtr", + "StructHeadInterfacePtr", + "StructHeadOmitEmptyInterfacePtr", + "StructPtrHeadInterfacePtr", + "StructPtrHeadOmitEmptyInterfacePtr", + "StructHeadIntPtrString", + "StructHeadOmitEmptyIntPtrString", + "StructPtrHeadIntPtrString", + "StructPtrHeadOmitEmptyIntPtrString", + "StructHeadUintPtrString", + "StructHeadOmitEmptyUintPtrString", + "StructPtrHeadUintPtrString", + "StructPtrHeadOmitEmptyUintPtrString", + "StructHeadFloat32PtrString", + "StructHeadOmitEmptyFloat32PtrString", + "StructPtrHeadFloat32PtrString", + "StructPtrHeadOmitEmptyFloat32PtrString", + "StructHeadFloat64PtrString", + "StructHeadOmitEmptyFloat64PtrString", + "StructPtrHeadFloat64PtrString", + "StructPtrHeadOmitEmptyFloat64PtrString", + "StructHeadBoolPtrString", + "StructHeadOmitEmptyBoolPtrString", + "StructPtrHeadBoolPtrString", + "StructPtrHeadOmitEmptyBoolPtrString", + "StructHeadStringPtrString", + "StructHeadOmitEmptyStringPtrString", + "StructPtrHeadStringPtrString", + "StructPtrHeadOmitEmptyStringPtrString", + "StructHeadNumberPtrString", + "StructHeadOmitEmptyNumberPtrString", + "StructPtrHeadNumberPtrString", + "StructPtrHeadOmitEmptyNumberPtrString", + "StructHead", + "StructHeadOmitEmpty", + "StructPtrHead", + "StructPtrHeadOmitEmpty", + "StructFieldInt", + "StructFieldOmitEmptyInt", + "StructEndInt", + "StructEndOmitEmptyInt", + "StructFieldUint", + "StructFieldOmitEmptyUint", + "StructEndUint", + "StructEndOmitEmptyUint", + "StructFieldFloat32", + "StructFieldOmitEmptyFloat32", + "StructEndFloat32", + "StructEndOmitEmptyFloat32", + "StructFieldFloat64", + "StructFieldOmitEmptyFloat64", + "StructEndFloat64", + "StructEndOmitEmptyFloat64", + "StructFieldBool", + "StructFieldOmitEmptyBool", + "StructEndBool", + "StructEndOmitEmptyBool", + "StructFieldString", + "StructFieldOmitEmptyString", + "StructEndString", + "StructEndOmitEmptyString", + "StructFieldBytes", + "StructFieldOmitEmptyBytes", + "StructEndBytes", + "StructEndOmitEmptyBytes", + "StructFieldNumber", + "StructFieldOmitEmptyNumber", + "StructEndNumber", + "StructEndOmitEmptyNumber", + "StructFieldArray", + "StructFieldOmitEmptyArray", + "StructEndArray", + "StructEndOmitEmptyArray", + "StructFieldMap", + "StructFieldOmitEmptyMap", + "StructEndMap", + "StructEndOmitEmptyMap", + "StructFieldSlice", + "StructFieldOmitEmptySlice", + "StructEndSlice", + "StructEndOmitEmptySlice", + "StructFieldStruct", + "StructFieldOmitEmptyStruct", + "StructEndStruct", + "StructEndOmitEmptyStruct", + "StructFieldMarshalJSON", + "StructFieldOmitEmptyMarshalJSON", + "StructEndMarshalJSON", + "StructEndOmitEmptyMarshalJSON", + "StructFieldMarshalText", + "StructFieldOmitEmptyMarshalText", + "StructEndMarshalText", + "StructEndOmitEmptyMarshalText", + "StructFieldIntString", + "StructFieldOmitEmptyIntString", + "StructEndIntString", + "StructEndOmitEmptyIntString", + "StructFieldUintString", + "StructFieldOmitEmptyUintString", + "StructEndUintString", + "StructEndOmitEmptyUintString", + "StructFieldFloat32String", + "StructFieldOmitEmptyFloat32String", + "StructEndFloat32String", + "StructEndOmitEmptyFloat32String", + "StructFieldFloat64String", + "StructFieldOmitEmptyFloat64String", + "StructEndFloat64String", + "StructEndOmitEmptyFloat64String", + "StructFieldBoolString", + "StructFieldOmitEmptyBoolString", + "StructEndBoolString", + "StructEndOmitEmptyBoolString", + "StructFieldStringString", + "StructFieldOmitEmptyStringString", + "StructEndStringString", + "StructEndOmitEmptyStringString", + "StructFieldNumberString", + "StructFieldOmitEmptyNumberString", + "StructEndNumberString", + "StructEndOmitEmptyNumberString", + "StructFieldIntPtr", + "StructFieldOmitEmptyIntPtr", + "StructEndIntPtr", + "StructEndOmitEmptyIntPtr", + "StructFieldUintPtr", + "StructFieldOmitEmptyUintPtr", + "StructEndUintPtr", + "StructEndOmitEmptyUintPtr", + "StructFieldFloat32Ptr", + "StructFieldOmitEmptyFloat32Ptr", + "StructEndFloat32Ptr", + "StructEndOmitEmptyFloat32Ptr", + "StructFieldFloat64Ptr", + "StructFieldOmitEmptyFloat64Ptr", + "StructEndFloat64Ptr", + "StructEndOmitEmptyFloat64Ptr", + "StructFieldBoolPtr", + "StructFieldOmitEmptyBoolPtr", + "StructEndBoolPtr", + "StructEndOmitEmptyBoolPtr", + "StructFieldStringPtr", + "StructFieldOmitEmptyStringPtr", + "StructEndStringPtr", + "StructEndOmitEmptyStringPtr", + "StructFieldBytesPtr", + "StructFieldOmitEmptyBytesPtr", + "StructEndBytesPtr", + "StructEndOmitEmptyBytesPtr", + "StructFieldNumberPtr", + "StructFieldOmitEmptyNumberPtr", + "StructEndNumberPtr", + "StructEndOmitEmptyNumberPtr", + "StructFieldArrayPtr", + "StructFieldOmitEmptyArrayPtr", + "StructEndArrayPtr", + "StructEndOmitEmptyArrayPtr", + "StructFieldMapPtr", + "StructFieldOmitEmptyMapPtr", + "StructEndMapPtr", + "StructEndOmitEmptyMapPtr", + "StructFieldSlicePtr", + "StructFieldOmitEmptySlicePtr", + "StructEndSlicePtr", + "StructEndOmitEmptySlicePtr", + "StructFieldMarshalJSONPtr", + "StructFieldOmitEmptyMarshalJSONPtr", + "StructEndMarshalJSONPtr", + "StructEndOmitEmptyMarshalJSONPtr", + "StructFieldMarshalTextPtr", + "StructFieldOmitEmptyMarshalTextPtr", + "StructEndMarshalTextPtr", + "StructEndOmitEmptyMarshalTextPtr", + "StructFieldInterfacePtr", + "StructFieldOmitEmptyInterfacePtr", + "StructEndInterfacePtr", + "StructEndOmitEmptyInterfacePtr", + "StructFieldIntPtrString", + "StructFieldOmitEmptyIntPtrString", + "StructEndIntPtrString", + "StructEndOmitEmptyIntPtrString", + "StructFieldUintPtrString", + "StructFieldOmitEmptyUintPtrString", + "StructEndUintPtrString", + "StructEndOmitEmptyUintPtrString", + "StructFieldFloat32PtrString", + "StructFieldOmitEmptyFloat32PtrString", + "StructEndFloat32PtrString", + "StructEndOmitEmptyFloat32PtrString", + "StructFieldFloat64PtrString", + "StructFieldOmitEmptyFloat64PtrString", + "StructEndFloat64PtrString", + "StructEndOmitEmptyFloat64PtrString", + "StructFieldBoolPtrString", + "StructFieldOmitEmptyBoolPtrString", + "StructEndBoolPtrString", + "StructEndOmitEmptyBoolPtrString", + "StructFieldStringPtrString", + "StructFieldOmitEmptyStringPtrString", + "StructEndStringPtrString", + "StructEndOmitEmptyStringPtrString", + "StructFieldNumberPtrString", + "StructFieldOmitEmptyNumberPtrString", + "StructEndNumberPtrString", + "StructEndOmitEmptyNumberPtrString", + "StructField", + "StructFieldOmitEmpty", + "StructEnd", + "StructEndOmitEmpty", +} + +type OpType uint16 + +const ( + OpEnd OpType = 0 + OpInterface OpType = 1 + OpPtr OpType = 2 + OpSliceElem OpType = 3 + OpSliceEnd OpType = 4 + OpArrayElem OpType = 5 + OpArrayEnd OpType = 6 + OpMapKey OpType = 7 + OpMapValue OpType = 8 + OpMapEnd OpType = 9 + OpRecursive OpType = 10 + OpRecursivePtr OpType = 11 + OpRecursiveEnd OpType = 12 + OpInterfaceEnd OpType = 13 + OpInt OpType = 14 + OpUint OpType = 15 + OpFloat32 OpType = 16 + OpFloat64 OpType = 17 + OpBool OpType = 18 + OpString OpType = 19 + OpBytes OpType = 20 + OpNumber OpType = 21 + OpArray OpType = 22 + OpMap OpType = 23 + OpSlice OpType = 24 + OpStruct OpType = 25 + OpMarshalJSON OpType = 26 + OpMarshalText OpType = 27 + OpIntString OpType = 28 + OpUintString OpType = 29 + OpFloat32String OpType = 30 + OpFloat64String OpType = 31 + OpBoolString OpType = 32 + OpStringString OpType = 33 + OpNumberString OpType = 34 + OpIntPtr OpType = 35 + OpUintPtr OpType = 36 + OpFloat32Ptr OpType = 37 + OpFloat64Ptr OpType = 38 + OpBoolPtr OpType = 39 + OpStringPtr OpType = 40 + OpBytesPtr OpType = 41 + OpNumberPtr OpType = 42 + OpArrayPtr OpType = 43 + OpMapPtr OpType = 44 + OpSlicePtr OpType = 45 + OpMarshalJSONPtr OpType = 46 + OpMarshalTextPtr OpType = 47 + OpInterfacePtr OpType = 48 + OpIntPtrString OpType = 49 + OpUintPtrString OpType = 50 + OpFloat32PtrString OpType = 51 + OpFloat64PtrString OpType = 52 + OpBoolPtrString OpType = 53 + OpStringPtrString OpType = 54 + OpNumberPtrString OpType = 55 + OpStructHeadInt OpType = 56 + OpStructHeadOmitEmptyInt OpType = 57 + OpStructPtrHeadInt OpType = 58 + OpStructPtrHeadOmitEmptyInt OpType = 59 + OpStructHeadUint OpType = 60 + OpStructHeadOmitEmptyUint OpType = 61 + OpStructPtrHeadUint OpType = 62 + OpStructPtrHeadOmitEmptyUint OpType = 63 + OpStructHeadFloat32 OpType = 64 + OpStructHeadOmitEmptyFloat32 OpType = 65 + OpStructPtrHeadFloat32 OpType = 66 + OpStructPtrHeadOmitEmptyFloat32 OpType = 67 + OpStructHeadFloat64 OpType = 68 + OpStructHeadOmitEmptyFloat64 OpType = 69 + OpStructPtrHeadFloat64 OpType = 70 + OpStructPtrHeadOmitEmptyFloat64 OpType = 71 + OpStructHeadBool OpType = 72 + OpStructHeadOmitEmptyBool OpType = 73 + OpStructPtrHeadBool OpType = 74 + OpStructPtrHeadOmitEmptyBool OpType = 75 + OpStructHeadString OpType = 76 + OpStructHeadOmitEmptyString OpType = 77 + OpStructPtrHeadString OpType = 78 + OpStructPtrHeadOmitEmptyString OpType = 79 + OpStructHeadBytes OpType = 80 + OpStructHeadOmitEmptyBytes OpType = 81 + OpStructPtrHeadBytes OpType = 82 + OpStructPtrHeadOmitEmptyBytes OpType = 83 + OpStructHeadNumber OpType = 84 + OpStructHeadOmitEmptyNumber OpType = 85 + OpStructPtrHeadNumber OpType = 86 + OpStructPtrHeadOmitEmptyNumber OpType = 87 + OpStructHeadArray OpType = 88 + OpStructHeadOmitEmptyArray OpType = 89 + OpStructPtrHeadArray OpType = 90 + OpStructPtrHeadOmitEmptyArray OpType = 91 + OpStructHeadMap OpType = 92 + OpStructHeadOmitEmptyMap OpType = 93 + OpStructPtrHeadMap OpType = 94 + OpStructPtrHeadOmitEmptyMap OpType = 95 + OpStructHeadSlice OpType = 96 + OpStructHeadOmitEmptySlice OpType = 97 + OpStructPtrHeadSlice OpType = 98 + OpStructPtrHeadOmitEmptySlice OpType = 99 + OpStructHeadStruct OpType = 100 + OpStructHeadOmitEmptyStruct OpType = 101 + OpStructPtrHeadStruct OpType = 102 + OpStructPtrHeadOmitEmptyStruct OpType = 103 + OpStructHeadMarshalJSON OpType = 104 + OpStructHeadOmitEmptyMarshalJSON OpType = 105 + OpStructPtrHeadMarshalJSON OpType = 106 + OpStructPtrHeadOmitEmptyMarshalJSON OpType = 107 + OpStructHeadMarshalText OpType = 108 + OpStructHeadOmitEmptyMarshalText OpType = 109 + OpStructPtrHeadMarshalText OpType = 110 + OpStructPtrHeadOmitEmptyMarshalText OpType = 111 + OpStructHeadIntString OpType = 112 + OpStructHeadOmitEmptyIntString OpType = 113 + OpStructPtrHeadIntString OpType = 114 + OpStructPtrHeadOmitEmptyIntString OpType = 115 + OpStructHeadUintString OpType = 116 + OpStructHeadOmitEmptyUintString OpType = 117 + OpStructPtrHeadUintString OpType = 118 + OpStructPtrHeadOmitEmptyUintString OpType = 119 + OpStructHeadFloat32String OpType = 120 + OpStructHeadOmitEmptyFloat32String OpType = 121 + OpStructPtrHeadFloat32String OpType = 122 + OpStructPtrHeadOmitEmptyFloat32String OpType = 123 + OpStructHeadFloat64String OpType = 124 + OpStructHeadOmitEmptyFloat64String OpType = 125 + OpStructPtrHeadFloat64String OpType = 126 + OpStructPtrHeadOmitEmptyFloat64String OpType = 127 + OpStructHeadBoolString OpType = 128 + OpStructHeadOmitEmptyBoolString OpType = 129 + OpStructPtrHeadBoolString OpType = 130 + OpStructPtrHeadOmitEmptyBoolString OpType = 131 + OpStructHeadStringString OpType = 132 + OpStructHeadOmitEmptyStringString OpType = 133 + OpStructPtrHeadStringString OpType = 134 + OpStructPtrHeadOmitEmptyStringString OpType = 135 + OpStructHeadNumberString OpType = 136 + OpStructHeadOmitEmptyNumberString OpType = 137 + OpStructPtrHeadNumberString OpType = 138 + OpStructPtrHeadOmitEmptyNumberString OpType = 139 + OpStructHeadIntPtr OpType = 140 + OpStructHeadOmitEmptyIntPtr OpType = 141 + OpStructPtrHeadIntPtr OpType = 142 + OpStructPtrHeadOmitEmptyIntPtr OpType = 143 + OpStructHeadUintPtr OpType = 144 + OpStructHeadOmitEmptyUintPtr OpType = 145 + OpStructPtrHeadUintPtr OpType = 146 + OpStructPtrHeadOmitEmptyUintPtr OpType = 147 + OpStructHeadFloat32Ptr OpType = 148 + OpStructHeadOmitEmptyFloat32Ptr OpType = 149 + OpStructPtrHeadFloat32Ptr OpType = 150 + OpStructPtrHeadOmitEmptyFloat32Ptr OpType = 151 + OpStructHeadFloat64Ptr OpType = 152 + OpStructHeadOmitEmptyFloat64Ptr OpType = 153 + OpStructPtrHeadFloat64Ptr OpType = 154 + OpStructPtrHeadOmitEmptyFloat64Ptr OpType = 155 + OpStructHeadBoolPtr OpType = 156 + OpStructHeadOmitEmptyBoolPtr OpType = 157 + OpStructPtrHeadBoolPtr OpType = 158 + OpStructPtrHeadOmitEmptyBoolPtr OpType = 159 + OpStructHeadStringPtr OpType = 160 + OpStructHeadOmitEmptyStringPtr OpType = 161 + OpStructPtrHeadStringPtr OpType = 162 + OpStructPtrHeadOmitEmptyStringPtr OpType = 163 + OpStructHeadBytesPtr OpType = 164 + OpStructHeadOmitEmptyBytesPtr OpType = 165 + OpStructPtrHeadBytesPtr OpType = 166 + OpStructPtrHeadOmitEmptyBytesPtr OpType = 167 + OpStructHeadNumberPtr OpType = 168 + OpStructHeadOmitEmptyNumberPtr OpType = 169 + OpStructPtrHeadNumberPtr OpType = 170 + OpStructPtrHeadOmitEmptyNumberPtr OpType = 171 + OpStructHeadArrayPtr OpType = 172 + OpStructHeadOmitEmptyArrayPtr OpType = 173 + OpStructPtrHeadArrayPtr OpType = 174 + OpStructPtrHeadOmitEmptyArrayPtr OpType = 175 + OpStructHeadMapPtr OpType = 176 + OpStructHeadOmitEmptyMapPtr OpType = 177 + OpStructPtrHeadMapPtr OpType = 178 + OpStructPtrHeadOmitEmptyMapPtr OpType = 179 + OpStructHeadSlicePtr OpType = 180 + OpStructHeadOmitEmptySlicePtr OpType = 181 + OpStructPtrHeadSlicePtr OpType = 182 + OpStructPtrHeadOmitEmptySlicePtr OpType = 183 + OpStructHeadMarshalJSONPtr OpType = 184 + OpStructHeadOmitEmptyMarshalJSONPtr OpType = 185 + OpStructPtrHeadMarshalJSONPtr OpType = 186 + OpStructPtrHeadOmitEmptyMarshalJSONPtr OpType = 187 + OpStructHeadMarshalTextPtr OpType = 188 + OpStructHeadOmitEmptyMarshalTextPtr OpType = 189 + OpStructPtrHeadMarshalTextPtr OpType = 190 + OpStructPtrHeadOmitEmptyMarshalTextPtr OpType = 191 + OpStructHeadInterfacePtr OpType = 192 + OpStructHeadOmitEmptyInterfacePtr OpType = 193 + OpStructPtrHeadInterfacePtr OpType = 194 + OpStructPtrHeadOmitEmptyInterfacePtr OpType = 195 + OpStructHeadIntPtrString OpType = 196 + OpStructHeadOmitEmptyIntPtrString OpType = 197 + OpStructPtrHeadIntPtrString OpType = 198 + OpStructPtrHeadOmitEmptyIntPtrString OpType = 199 + OpStructHeadUintPtrString OpType = 200 + OpStructHeadOmitEmptyUintPtrString OpType = 201 + OpStructPtrHeadUintPtrString OpType = 202 + OpStructPtrHeadOmitEmptyUintPtrString OpType = 203 + OpStructHeadFloat32PtrString OpType = 204 + OpStructHeadOmitEmptyFloat32PtrString OpType = 205 + OpStructPtrHeadFloat32PtrString OpType = 206 + OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207 + OpStructHeadFloat64PtrString OpType = 208 + OpStructHeadOmitEmptyFloat64PtrString OpType = 209 + OpStructPtrHeadFloat64PtrString OpType = 210 + OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211 + OpStructHeadBoolPtrString OpType = 212 + OpStructHeadOmitEmptyBoolPtrString OpType = 213 + OpStructPtrHeadBoolPtrString OpType = 214 + OpStructPtrHeadOmitEmptyBoolPtrString OpType = 215 + OpStructHeadStringPtrString OpType = 216 + OpStructHeadOmitEmptyStringPtrString OpType = 217 + OpStructPtrHeadStringPtrString OpType = 218 + OpStructPtrHeadOmitEmptyStringPtrString OpType = 219 + OpStructHeadNumberPtrString OpType = 220 + OpStructHeadOmitEmptyNumberPtrString OpType = 221 + OpStructPtrHeadNumberPtrString OpType = 222 + OpStructPtrHeadOmitEmptyNumberPtrString OpType = 223 + OpStructHead OpType = 224 + OpStructHeadOmitEmpty OpType = 225 + OpStructPtrHead OpType = 226 + OpStructPtrHeadOmitEmpty OpType = 227 + OpStructFieldInt OpType = 228 + OpStructFieldOmitEmptyInt OpType = 229 + OpStructEndInt OpType = 230 + OpStructEndOmitEmptyInt OpType = 231 + OpStructFieldUint OpType = 232 + OpStructFieldOmitEmptyUint OpType = 233 + OpStructEndUint OpType = 234 + OpStructEndOmitEmptyUint OpType = 235 + OpStructFieldFloat32 OpType = 236 + OpStructFieldOmitEmptyFloat32 OpType = 237 + OpStructEndFloat32 OpType = 238 + OpStructEndOmitEmptyFloat32 OpType = 239 + OpStructFieldFloat64 OpType = 240 + OpStructFieldOmitEmptyFloat64 OpType = 241 + OpStructEndFloat64 OpType = 242 + OpStructEndOmitEmptyFloat64 OpType = 243 + OpStructFieldBool OpType = 244 + OpStructFieldOmitEmptyBool OpType = 245 + OpStructEndBool OpType = 246 + OpStructEndOmitEmptyBool OpType = 247 + OpStructFieldString OpType = 248 + OpStructFieldOmitEmptyString OpType = 249 + OpStructEndString OpType = 250 + OpStructEndOmitEmptyString OpType = 251 + OpStructFieldBytes OpType = 252 + OpStructFieldOmitEmptyBytes OpType = 253 + OpStructEndBytes OpType = 254 + OpStructEndOmitEmptyBytes OpType = 255 + OpStructFieldNumber OpType = 256 + OpStructFieldOmitEmptyNumber OpType = 257 + OpStructEndNumber OpType = 258 + OpStructEndOmitEmptyNumber OpType = 259 + OpStructFieldArray OpType = 260 + OpStructFieldOmitEmptyArray OpType = 261 + OpStructEndArray OpType = 262 + OpStructEndOmitEmptyArray OpType = 263 + OpStructFieldMap OpType = 264 + OpStructFieldOmitEmptyMap OpType = 265 + OpStructEndMap OpType = 266 + OpStructEndOmitEmptyMap OpType = 267 + OpStructFieldSlice OpType = 268 + OpStructFieldOmitEmptySlice OpType = 269 + OpStructEndSlice OpType = 270 + OpStructEndOmitEmptySlice OpType = 271 + OpStructFieldStruct OpType = 272 + OpStructFieldOmitEmptyStruct OpType = 273 + OpStructEndStruct OpType = 274 + OpStructEndOmitEmptyStruct OpType = 275 + OpStructFieldMarshalJSON OpType = 276 + OpStructFieldOmitEmptyMarshalJSON OpType = 277 + OpStructEndMarshalJSON OpType = 278 + OpStructEndOmitEmptyMarshalJSON OpType = 279 + OpStructFieldMarshalText OpType = 280 + OpStructFieldOmitEmptyMarshalText OpType = 281 + OpStructEndMarshalText OpType = 282 + OpStructEndOmitEmptyMarshalText OpType = 283 + OpStructFieldIntString OpType = 284 + OpStructFieldOmitEmptyIntString OpType = 285 + OpStructEndIntString OpType = 286 + OpStructEndOmitEmptyIntString OpType = 287 + OpStructFieldUintString OpType = 288 + OpStructFieldOmitEmptyUintString OpType = 289 + OpStructEndUintString OpType = 290 + OpStructEndOmitEmptyUintString OpType = 291 + OpStructFieldFloat32String OpType = 292 + OpStructFieldOmitEmptyFloat32String OpType = 293 + OpStructEndFloat32String OpType = 294 + OpStructEndOmitEmptyFloat32String OpType = 295 + OpStructFieldFloat64String OpType = 296 + OpStructFieldOmitEmptyFloat64String OpType = 297 + OpStructEndFloat64String OpType = 298 + OpStructEndOmitEmptyFloat64String OpType = 299 + OpStructFieldBoolString OpType = 300 + OpStructFieldOmitEmptyBoolString OpType = 301 + OpStructEndBoolString OpType = 302 + OpStructEndOmitEmptyBoolString OpType = 303 + OpStructFieldStringString OpType = 304 + OpStructFieldOmitEmptyStringString OpType = 305 + OpStructEndStringString OpType = 306 + OpStructEndOmitEmptyStringString OpType = 307 + OpStructFieldNumberString OpType = 308 + OpStructFieldOmitEmptyNumberString OpType = 309 + OpStructEndNumberString OpType = 310 + OpStructEndOmitEmptyNumberString OpType = 311 + OpStructFieldIntPtr OpType = 312 + OpStructFieldOmitEmptyIntPtr OpType = 313 + OpStructEndIntPtr OpType = 314 + OpStructEndOmitEmptyIntPtr OpType = 315 + OpStructFieldUintPtr OpType = 316 + OpStructFieldOmitEmptyUintPtr OpType = 317 + OpStructEndUintPtr OpType = 318 + OpStructEndOmitEmptyUintPtr OpType = 319 + OpStructFieldFloat32Ptr OpType = 320 + OpStructFieldOmitEmptyFloat32Ptr OpType = 321 + OpStructEndFloat32Ptr OpType = 322 + OpStructEndOmitEmptyFloat32Ptr OpType = 323 + OpStructFieldFloat64Ptr OpType = 324 + OpStructFieldOmitEmptyFloat64Ptr OpType = 325 + OpStructEndFloat64Ptr OpType = 326 + OpStructEndOmitEmptyFloat64Ptr OpType = 327 + OpStructFieldBoolPtr OpType = 328 + OpStructFieldOmitEmptyBoolPtr OpType = 329 + OpStructEndBoolPtr OpType = 330 + OpStructEndOmitEmptyBoolPtr OpType = 331 + OpStructFieldStringPtr OpType = 332 + OpStructFieldOmitEmptyStringPtr OpType = 333 + OpStructEndStringPtr OpType = 334 + OpStructEndOmitEmptyStringPtr OpType = 335 + OpStructFieldBytesPtr OpType = 336 + OpStructFieldOmitEmptyBytesPtr OpType = 337 + OpStructEndBytesPtr OpType = 338 + OpStructEndOmitEmptyBytesPtr OpType = 339 + OpStructFieldNumberPtr OpType = 340 + OpStructFieldOmitEmptyNumberPtr OpType = 341 + OpStructEndNumberPtr OpType = 342 + OpStructEndOmitEmptyNumberPtr OpType = 343 + OpStructFieldArrayPtr OpType = 344 + OpStructFieldOmitEmptyArrayPtr OpType = 345 + OpStructEndArrayPtr OpType = 346 + OpStructEndOmitEmptyArrayPtr OpType = 347 + OpStructFieldMapPtr OpType = 348 + OpStructFieldOmitEmptyMapPtr OpType = 349 + OpStructEndMapPtr OpType = 350 + OpStructEndOmitEmptyMapPtr OpType = 351 + OpStructFieldSlicePtr OpType = 352 + OpStructFieldOmitEmptySlicePtr OpType = 353 + OpStructEndSlicePtr OpType = 354 + OpStructEndOmitEmptySlicePtr OpType = 355 + OpStructFieldMarshalJSONPtr OpType = 356 + OpStructFieldOmitEmptyMarshalJSONPtr OpType = 357 + OpStructEndMarshalJSONPtr OpType = 358 + OpStructEndOmitEmptyMarshalJSONPtr OpType = 359 + OpStructFieldMarshalTextPtr OpType = 360 + OpStructFieldOmitEmptyMarshalTextPtr OpType = 361 + OpStructEndMarshalTextPtr OpType = 362 + OpStructEndOmitEmptyMarshalTextPtr OpType = 363 + OpStructFieldInterfacePtr OpType = 364 + OpStructFieldOmitEmptyInterfacePtr OpType = 365 + OpStructEndInterfacePtr OpType = 366 + OpStructEndOmitEmptyInterfacePtr OpType = 367 + OpStructFieldIntPtrString OpType = 368 + OpStructFieldOmitEmptyIntPtrString OpType = 369 + OpStructEndIntPtrString OpType = 370 + OpStructEndOmitEmptyIntPtrString OpType = 371 + OpStructFieldUintPtrString OpType = 372 + OpStructFieldOmitEmptyUintPtrString OpType = 373 + OpStructEndUintPtrString OpType = 374 + OpStructEndOmitEmptyUintPtrString OpType = 375 + OpStructFieldFloat32PtrString OpType = 376 + OpStructFieldOmitEmptyFloat32PtrString OpType = 377 + OpStructEndFloat32PtrString OpType = 378 + OpStructEndOmitEmptyFloat32PtrString OpType = 379 + OpStructFieldFloat64PtrString OpType = 380 + OpStructFieldOmitEmptyFloat64PtrString OpType = 381 + OpStructEndFloat64PtrString OpType = 382 + OpStructEndOmitEmptyFloat64PtrString OpType = 383 + OpStructFieldBoolPtrString OpType = 384 + OpStructFieldOmitEmptyBoolPtrString OpType = 385 + OpStructEndBoolPtrString OpType = 386 + OpStructEndOmitEmptyBoolPtrString OpType = 387 + OpStructFieldStringPtrString OpType = 388 + OpStructFieldOmitEmptyStringPtrString OpType = 389 + OpStructEndStringPtrString OpType = 390 + OpStructEndOmitEmptyStringPtrString OpType = 391 + OpStructFieldNumberPtrString OpType = 392 + OpStructFieldOmitEmptyNumberPtrString OpType = 393 + OpStructEndNumberPtrString OpType = 394 + OpStructEndOmitEmptyNumberPtrString OpType = 395 + OpStructField OpType = 396 + OpStructFieldOmitEmpty OpType = 397 + OpStructEnd OpType = 398 + OpStructEndOmitEmpty OpType = 399 +) + +func (t OpType) String() string { + if int(t) >= 400 { + return "" + } + return opTypeStrings[int(t)] +} + +func (t OpType) CodeType() CodeType { + if strings.Contains(t.String(), "Struct") { + if strings.Contains(t.String(), "End") { + return CodeStructEnd + } + return CodeStructField + } + switch t { + case OpArray, OpArrayPtr: + return CodeArrayHead + case OpArrayElem: + return CodeArrayElem + case OpSlice, OpSlicePtr: + return CodeSliceHead + case OpSliceElem: + return CodeSliceElem + case OpMap, OpMapPtr: + return CodeMapHead + case OpMapKey: + return CodeMapKey + case OpMapValue: + return CodeMapValue + case OpMapEnd: + return CodeMapEnd + } + + return CodeOp +} + +func (t OpType) HeadToPtrHead() OpType { + if strings.Index(t.String(), "PtrHead") > 0 { + return t + } + + idx := strings.Index(t.String(), "Head") + if idx == -1 { + return t + } + suffix := "PtrHead" + t.String()[idx+len("Head"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)+toPtrOffset).String(), suffix) { + return OpType(int(t) + toPtrOffset) + } + return t +} + +func (t OpType) HeadToOmitEmptyHead() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + + return t +} + +func (t OpType) PtrHeadToHead() OpType { + idx := strings.Index(t.String(), "PtrHead") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Ptr"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)-toPtrOffset).String(), suffix) { + return OpType(int(t) - toPtrOffset) + } + return t +} + +func (t OpType) FieldToEnd() OpType { + idx := strings.Index(t.String(), "Field") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Field"):] + if suffix == "" || suffix == "OmitEmpty" { + return t + } + const toEndOffset = 2 + if strings.Contains(OpType(int(t)+toEndOffset).String(), "End"+suffix) { + return OpType(int(t) + toEndOffset) + } + return t +} + +func (t OpType) FieldToOmitEmptyField() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + return t +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/query.go b/vendor/github.com/goccy/go-json/internal/encoder/query.go new file mode 100644 index 0000000000..1e1850cc15 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/query.go @@ -0,0 +1,135 @@ +package encoder + +import ( + "context" + "fmt" + "reflect" +) + +var ( + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +) + +type FieldQuery struct { + Name string + Fields []*FieldQuery + hash string +} + +func (q *FieldQuery) Hash() string { + if q.hash != "" { + return q.hash + } + b, _ := Marshal(q) + q.hash = string(b) + return q.hash +} + +func (q *FieldQuery) MarshalJSON() ([]byte, error) { + if q.Name != "" { + if len(q.Fields) > 0 { + return Marshal(map[string][]*FieldQuery{q.Name: q.Fields}) + } + return Marshal(q.Name) + } + return Marshal(q.Fields) +} + +func (q *FieldQuery) QueryString() (FieldQueryString, error) { + b, err := Marshal(q) + if err != nil { + return "", err + } + return FieldQueryString(b), nil +} + +type FieldQueryString string + +func (s FieldQueryString) Build() (*FieldQuery, error) { + var query interface{} + if err := Unmarshal([]byte(s), &query); err != nil { + return nil, err + } + return s.build(reflect.ValueOf(query)) +} + +func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) { + switch v.Type().Kind() { + case reflect.String: + return s.buildString(v) + case reflect.Map: + return s.buildMap(v) + case reflect.Slice: + return s.buildSlice(v) + case reflect.Interface: + return s.build(reflect.ValueOf(v.Interface())) + } + return nil, fmt.Errorf("failed to build field query") +} + +func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) { + b := []byte(v.String()) + switch b[0] { + case '[', '{': + var query interface{} + if err := Unmarshal(b, &query); err != nil { + return nil, err + } + if str, ok := query.(string); ok { + return &FieldQuery{Name: str}, nil + } + return s.build(reflect.ValueOf(query)) + } + return &FieldQuery{Name: string(b)}, nil +} + +func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) { + fields := make([]*FieldQuery, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + def, err := s.build(v.Index(i)) + if err != nil { + return nil, err + } + fields = append(fields, def) + } + return &FieldQuery{Fields: fields}, nil +} + +func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) { + keys := v.MapKeys() + if len(keys) != 1 { + return nil, fmt.Errorf("failed to build field query object") + } + key := keys[0] + if key.Type().Kind() != reflect.String { + return nil, fmt.Errorf("failed to build field query. invalid object key type") + } + name := key.String() + def, err := s.build(v.MapIndex(key)) + if err != nil { + return nil, err + } + return &FieldQuery{ + Name: name, + Fields: def.Fields, + }, nil +} + +type queryKey struct{} + +func FieldQueryFromContext(ctx context.Context) *FieldQuery { + query := ctx.Value(queryKey{}) + if query == nil { + return nil + } + q, ok := query.(*FieldQuery) + if !ok { + return nil + } + return q +} + +func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context { + return context.WithValue(ctx, queryKey{}, query) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go new file mode 100644 index 0000000000..e4152b27c7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go @@ -0,0 +1,459 @@ +package encoder + +import ( + "math/bits" + "reflect" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +var hex = "0123456789abcdef" + +//nolint:govet +func stringToUint64Slice(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} + +func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte { + if ctx.Option.Flag&HTMLEscapeOption != 0 { + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedHTMLString(buf, s) + } + return appendHTMLString(buf, s) + } + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedString(buf, s) + } + return appendString(buf, s) +} + +func appendNormalizedHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTMLNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTMLNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTML[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTML[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} + +func appendNormalizedString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscape[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscape[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string_table.go b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go new file mode 100644 index 0000000000..ebe42c92df --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go @@ -0,0 +1,415 @@ +package encoder + +var needEscapeHTMLNormalizeUTF8 = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeNormalizeUTF8 = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeHTML = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} + +var needEscape = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go new file mode 100644 index 0000000000..fbbc0de44c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go @@ -0,0 +1,35 @@ +package vm + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + defer func() { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go new file mode 100644 index 0000000000..65252b4a5c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go @@ -0,0 +1,9 @@ +package vm + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go new file mode 100644 index 0000000000..86291d7bb3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go @@ -0,0 +1,207 @@ +package vm + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key...) + b[len(b)-1] = ':' + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + b[len(b)-1] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalText(ctx, code, b, v) +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return append(b, code.Key...) +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go new file mode 100644 index 0000000000..645d20f9fb --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go new file mode 100644 index 0000000000..925f61ed8e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go new file mode 100644 index 0000000000..12ec56c5bb --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go @@ -0,0 +1,9 @@ +package vm_color + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go new file mode 100644 index 0000000000..33f29aee44 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go @@ -0,0 +1,274 @@ +package vm_color + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key[:len(key)-1]...) + b = append(b, ':') + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalText(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':') +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go new file mode 100644 index 0000000000..a63e83e550 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go new file mode 100644 index 0000000000..dd4cd489e0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go new file mode 100644 index 0000000000..60e4a8ed56 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go @@ -0,0 +1,296 @@ +package vm_color_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendIndent = encoder.AppendIndent + appendStructEnd = encoder.AppendStructEndIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} + +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',', '\n') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '\n' + b = appendIndent(ctx, b, code.Indent-1) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':', ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go new file mode 100644 index 0000000000..3b4e22e5d4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go new file mode 100644 index 0000000000..99395388c1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go new file mode 100644 index 0000000000..9e245bfe57 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go @@ -0,0 +1,9 @@ +package vm_indent + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go new file mode 100644 index 0000000000..fca8f18555 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go @@ -0,0 +1,229 @@ +package vm_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + appendStructEnd = encoder.AppendStructEndIndent + appendIndent = encoder.AppendIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,\n"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '\n' + b = appendIndent(ctx, b, code.Indent-1) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalTextIndent(ctx, code, b, v) +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + b = append(b, code.Key...) + return append(b, ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go new file mode 100644 index 0000000000..836c5c8a85 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go new file mode 100644 index 0000000000..d58e39f4e1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/errors/error.go @@ -0,0 +1,164 @@ +package errors + +import ( + "fmt" + "reflect" + "strconv" +) + +type InvalidUTF8Error struct { + S string // the whole string value that caused the error +} + +func (e *InvalidUTF8Error) Error() string { + return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S)) +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type) + } + return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type) +} + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError struct { + Type reflect.Type + Err error + sourceFunc string +} + +func (e *MarshalerError) Error() string { + srcFunc := e.sourceFunc + if srcFunc == "" { + srcFunc = "MarshalJSON" + } + return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error()) +} + +// Unwrap returns the underlying error. +func (e *MarshalerError) Unwrap() error { return e.Err } + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError struct { + msg string // description of error + Offset int64 // error occurred after reading Offset bytes +} + +func (e *SyntaxError) Error() string { return e.msg } + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s", + strconv.Quote(e.Key), e.Field.Name, e.Type.String(), + ) +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s", + e.Value, e.Struct, e.Field, e.Type, + ) + } + return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type) +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("json: unsupported type: %s", e.Type) +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return fmt.Sprintf("json: unsupported value: %s", e.Str) +} + +func ErrSyntax(msg string, offset int64) *SyntaxError { + return &SyntaxError{msg: msg, Offset: offset} +} + +func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError { + return &MarshalerError{ + Type: typ, + Err: err, + sourceFunc: msg, + } +} + +func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf(`invalid character "%c" exceeded max depth`, c), + Offset: cursor, + } +} + +func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError { + return &SyntaxError{msg: "not at beginning of value", Offset: cursor} +} + +func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("json: %s unexpected end of JSON input", msg), + Offset: cursor, + } +} + +func ErrExpected(msg string, cursor int64) *SyntaxError { + return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor} +} + +func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError { + if c == 0 { + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character as %s", context), + Offset: cursor, + } + } + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character %c as %s", c, context), + Offset: cursor, + } +} + +func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("invalid character '%c' looking for beginning of value", c), + Offset: cursor, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go new file mode 100644 index 0000000000..4db10debe1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go @@ -0,0 +1,263 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +// Type representing reflect.rtype for noescape trick +type Type struct{} + +//go:linkname rtype_Align reflect.(*rtype).Align +//go:noescape +func rtype_Align(*Type) int + +func (t *Type) Align() int { + return rtype_Align(t) +} + +//go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign +//go:noescape +func rtype_FieldAlign(*Type) int + +func (t *Type) FieldAlign() int { + return rtype_FieldAlign(t) +} + +//go:linkname rtype_Method reflect.(*rtype).Method +//go:noescape +func rtype_Method(*Type, int) reflect.Method + +func (t *Type) Method(a0 int) reflect.Method { + return rtype_Method(t, a0) +} + +//go:linkname rtype_MethodByName reflect.(*rtype).MethodByName +//go:noescape +func rtype_MethodByName(*Type, string) (reflect.Method, bool) + +func (t *Type) MethodByName(a0 string) (reflect.Method, bool) { + return rtype_MethodByName(t, a0) +} + +//go:linkname rtype_NumMethod reflect.(*rtype).NumMethod +//go:noescape +func rtype_NumMethod(*Type) int + +func (t *Type) NumMethod() int { + return rtype_NumMethod(t) +} + +//go:linkname rtype_Name reflect.(*rtype).Name +//go:noescape +func rtype_Name(*Type) string + +func (t *Type) Name() string { + return rtype_Name(t) +} + +//go:linkname rtype_PkgPath reflect.(*rtype).PkgPath +//go:noescape +func rtype_PkgPath(*Type) string + +func (t *Type) PkgPath() string { + return rtype_PkgPath(t) +} + +//go:linkname rtype_Size reflect.(*rtype).Size +//go:noescape +func rtype_Size(*Type) uintptr + +func (t *Type) Size() uintptr { + return rtype_Size(t) +} + +//go:linkname rtype_String reflect.(*rtype).String +//go:noescape +func rtype_String(*Type) string + +func (t *Type) String() string { + return rtype_String(t) +} + +//go:linkname rtype_Kind reflect.(*rtype).Kind +//go:noescape +func rtype_Kind(*Type) reflect.Kind + +func (t *Type) Kind() reflect.Kind { + return rtype_Kind(t) +} + +//go:linkname rtype_Implements reflect.(*rtype).Implements +//go:noescape +func rtype_Implements(*Type, reflect.Type) bool + +func (t *Type) Implements(u reflect.Type) bool { + return rtype_Implements(t, u) +} + +//go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo +//go:noescape +func rtype_AssignableTo(*Type, reflect.Type) bool + +func (t *Type) AssignableTo(u reflect.Type) bool { + return rtype_AssignableTo(t, u) +} + +//go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo +//go:noescape +func rtype_ConvertibleTo(*Type, reflect.Type) bool + +func (t *Type) ConvertibleTo(u reflect.Type) bool { + return rtype_ConvertibleTo(t, u) +} + +//go:linkname rtype_Comparable reflect.(*rtype).Comparable +//go:noescape +func rtype_Comparable(*Type) bool + +func (t *Type) Comparable() bool { + return rtype_Comparable(t) +} + +//go:linkname rtype_Bits reflect.(*rtype).Bits +//go:noescape +func rtype_Bits(*Type) int + +func (t *Type) Bits() int { + return rtype_Bits(t) +} + +//go:linkname rtype_ChanDir reflect.(*rtype).ChanDir +//go:noescape +func rtype_ChanDir(*Type) reflect.ChanDir + +func (t *Type) ChanDir() reflect.ChanDir { + return rtype_ChanDir(t) +} + +//go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic +//go:noescape +func rtype_IsVariadic(*Type) bool + +func (t *Type) IsVariadic() bool { + return rtype_IsVariadic(t) +} + +//go:linkname rtype_Elem reflect.(*rtype).Elem +//go:noescape +func rtype_Elem(*Type) reflect.Type + +func (t *Type) Elem() *Type { + return Type2RType(rtype_Elem(t)) +} + +//go:linkname rtype_Field reflect.(*rtype).Field +//go:noescape +func rtype_Field(*Type, int) reflect.StructField + +func (t *Type) Field(i int) reflect.StructField { + return rtype_Field(t, i) +} + +//go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex +//go:noescape +func rtype_FieldByIndex(*Type, []int) reflect.StructField + +func (t *Type) FieldByIndex(index []int) reflect.StructField { + return rtype_FieldByIndex(t, index) +} + +//go:linkname rtype_FieldByName reflect.(*rtype).FieldByName +//go:noescape +func rtype_FieldByName(*Type, string) (reflect.StructField, bool) + +func (t *Type) FieldByName(name string) (reflect.StructField, bool) { + return rtype_FieldByName(t, name) +} + +//go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc +//go:noescape +func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool) + +func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) { + return rtype_FieldByNameFunc(t, match) +} + +//go:linkname rtype_In reflect.(*rtype).In +//go:noescape +func rtype_In(*Type, int) reflect.Type + +func (t *Type) In(i int) reflect.Type { + return rtype_In(t, i) +} + +//go:linkname rtype_Key reflect.(*rtype).Key +//go:noescape +func rtype_Key(*Type) reflect.Type + +func (t *Type) Key() *Type { + return Type2RType(rtype_Key(t)) +} + +//go:linkname rtype_Len reflect.(*rtype).Len +//go:noescape +func rtype_Len(*Type) int + +func (t *Type) Len() int { + return rtype_Len(t) +} + +//go:linkname rtype_NumField reflect.(*rtype).NumField +//go:noescape +func rtype_NumField(*Type) int + +func (t *Type) NumField() int { + return rtype_NumField(t) +} + +//go:linkname rtype_NumIn reflect.(*rtype).NumIn +//go:noescape +func rtype_NumIn(*Type) int + +func (t *Type) NumIn() int { + return rtype_NumIn(t) +} + +//go:linkname rtype_NumOut reflect.(*rtype).NumOut +//go:noescape +func rtype_NumOut(*Type) int + +func (t *Type) NumOut() int { + return rtype_NumOut(t) +} + +//go:linkname rtype_Out reflect.(*rtype).Out +//go:noescape +func rtype_Out(*Type, int) reflect.Type + +//go:linkname PtrTo reflect.(*rtype).ptrTo +//go:noescape +func PtrTo(*Type) *Type + +func (t *Type) Out(i int) reflect.Type { + return rtype_Out(t, i) +} + +//go:linkname IfaceIndir reflect.ifaceIndir +//go:noescape +func IfaceIndir(*Type) bool + +//go:linkname RType2Type reflect.toType +//go:noescape +func RType2Type(t *Type) reflect.Type + +//go:nolint structcheck +type emptyInterface struct { + _ *Type + ptr unsafe.Pointer +} + +func Type2RType(t reflect.Type) *Type { + return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr) +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go new file mode 100644 index 0000000000..baab0c5978 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "reflect" + "strings" + "unicode" +) + +func getTag(field reflect.StructField) string { + return field.Tag.Get("json") +} + +func IsIgnoredStructField(field reflect.StructField) bool { + if field.PkgPath != "" { + if field.Anonymous { + t := field.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return true + } + } else { + // private field + return true + } + } + tag := getTag(field) + return tag == "-" +} + +type StructTag struct { + Key string + IsTaggedKey bool + IsOmitEmpty bool + IsString bool + Field reflect.StructField +} + +type StructTags []*StructTag + +func (t StructTags) ExistsKey(key string) bool { + for _, tt := range t { + if tt.Key == key { + return true + } + } + return false +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +func StructTagFromField(field reflect.StructField) *StructTag { + keyName := field.Name + tag := getTag(field) + st := &StructTag{Field: field} + opts := strings.Split(tag, ",") + if len(opts) > 0 { + if opts[0] != "" && isValidTag(opts[0]) { + keyName = opts[0] + st.IsTaggedKey = true + } + } + st.Key = keyName + if len(opts) > 1 { + for _, opt := range opts[1:] { + switch opt { + case "omitempty": + st.IsOmitEmpty = true + case "string": + st.IsString = true + } + } + } + return st +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go new file mode 100644 index 0000000000..0167cd2c01 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -0,0 +1,100 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +type SliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +const ( + maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib +) + +type TypeAddr struct { + BaseTypeAddr uintptr + MaxTypeAddr uintptr + AddrRange uintptr + AddrShift uintptr +} + +var ( + typeAddr *TypeAddr + alreadyAnalyzed bool +) + +//go:linkname typelinks reflect.typelinks +func typelinks() ([]unsafe.Pointer, [][]int32) + +//go:linkname rtypeOff reflect.rtypeOff +func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer + +func AnalyzeTypeAddr() *TypeAddr { + defer func() { + alreadyAnalyzed = true + }() + if alreadyAnalyzed { + return typeAddr + } + sections, offsets := typelinks() + if len(sections) != 1 { + return nil + } + if len(offsets) != 1 { + return nil + } + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return nil + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 + } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return nil + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + return typeAddr +} diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go new file mode 100644 index 0000000000..413cb20bf3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/json.go @@ -0,0 +1,371 @@ +package json + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/goccy/go-json/internal/encoder" +) + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid JSON. +type Marshaler interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerContext is the interface implemented by types that +// can marshal themselves into valid JSON with context.Context. +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerContext is the interface implemented by types +// that can unmarshal with context.Context a JSON description of themselves. +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder that had SetEscapeHTML(false) +// called on it. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. +// Each exported struct field becomes a member of the object, using the +// field name as the object key, unless the field is omitted for one of the +// reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. +// The format string gives the name of the field, possibly followed by a +// comma-separated list of options. The name may be empty in order to +// specify options without overriding the default field name. +// +// The "omitempty" option specifies that the field should be omitted +// from the encoding if the field has an empty value, defined as +// false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. +// +// As a special case, if the field tag is "-", the field is always omitted. +// Note that a field with name "-" can still be generated using the tag "-,". +// +// Examples of struct field tags and their meanings: +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "-". +// Field int `json:"-,"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, and ASCII punctuation except quotation +// marks, backslash, and comma. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a +// string, an integer type, or implement encoding.TextMarshaler. The map keys +// are sorted and used as JSON object keys by applying the following rules, +// subject to the UTF-8 coercion described for string values above: +// - string keys are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + return MarshalWithOption(v) +} + +// MarshalNoEscape returns the JSON encoding of v and doesn't escape v. +func MarshalNoEscape(v interface{}) ([]byte, error) { + return marshalNoEscape(v) +} + +// MarshalContext returns the JSON encoding of v with context.Context and EncodeOption. +func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalContext(ctx, v, optFuncs...) +} + +// MarshalWithOption returns the JSON encoding of v with EncodeOption. +func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshal(v, optFuncs...) +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +// Each JSON element in the output will begin on a new line beginning with prefix +// followed by one or more copies of indent according to the indentation nesting. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return MarshalIndentWithOption(v, prefix, indent) +} + +// MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption. +func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalIndent(v, prefix, indent, optFuncs...) +} + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + return unmarshal(data, v) +} + +// UnmarshalContext parses the JSON-encoded data and stores the result +// in the value pointed to by v. If you implement the UnmarshalerContext interface, +// call it with ctx as an argument. +func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalContext(ctx, data, v) +} + +func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshal(data, v, optFuncs...) +} + +func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalNoEscape(data, v, optFuncs...) +} + +// A Token holds a value of one of these types: +// +// Delim, for the four JSON delimiters [ ] { } +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// +type Token = json.Token + +// A Number represents a JSON number literal. +type Number = json.Number + +// RawMessage is a raw encoded JSON value. +// It implements Marshaler and Unmarshaler and can +// be used to delay JSON decoding or precompute a JSON encoding. +type RawMessage = json.RawMessage + +// A Delim is a JSON array or object delimiter, one of [ ] { or }. +type Delim = json.Delim + +// Compact appends to dst the JSON-encoded src with +// insignificant space characters elided. +func Compact(dst *bytes.Buffer, src []byte) error { + return encoder.Compact(dst, src, false) +} + +// Indent appends to dst an indented form of the JSON-encoded src. +// Each element in a JSON object or array begins on a new, +// indented line beginning with prefix followed by one or more +// copies of indent according to the indentation nesting. +// The data appended to dst does not begin with the prefix nor +// any indentation, to make it easier to embed inside other formatted JSON data. +// Although leading space characters (space, tab, carriage return, newline) +// at the beginning of src are dropped, trailing space characters +// at the end of src are preserved and copied to dst. +// For example, if src has no trailing spaces, neither will dst; +// if src ends in a trailing newline, so will dst. +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return encoder.Indent(dst, src, prefix, indent) +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML + + + diff --git a/vendor/github.com/gookit/color/printer.go b/vendor/github.com/gookit/color/printer.go new file mode 100644 index 0000000000..985a0b624c --- /dev/null +++ b/vendor/github.com/gookit/color/printer.go @@ -0,0 +1,133 @@ +package color + +import "fmt" + +/************************************************************* + * colored message Printer + *************************************************************/ + +// PrinterFace interface +type PrinterFace interface { + fmt.Stringer + Sprint(a ...any) string + Sprintf(format string, a ...any) string + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) +} + +// Printer a generic color message printer. +// +// Usage: +// +// p := &Printer{Code: "32;45;3"} +// p.Print("message") +type Printer struct { + // NoColor disable color. + NoColor bool + // Code color code string. eg "32;45;3" + Code string +} + +// NewPrinter instance +func NewPrinter(colorCode string) *Printer { + return &Printer{Code: colorCode} +} + +// String returns color code string. eg: "32;45;3" +func (p *Printer) String() string { + // panic("implement me") + return p.Code +} + +// Sprint returns rendering colored messages +func (p *Printer) Sprint(a ...any) string { + return RenderCode(p.String(), a...) +} + +// Sprintf returns format and rendering colored messages +func (p *Printer) Sprintf(format string, a ...any) string { + return RenderString(p.String(), fmt.Sprintf(format, a...)) +} + +// Print rendering colored messages +func (p *Printer) Print(a ...any) { + doPrintV2(p.String(), fmt.Sprint(a...)) +} + +// Printf format and rendering colored messages +func (p *Printer) Printf(format string, a ...any) { + doPrintV2(p.String(), fmt.Sprintf(format, a...)) +} + +// Println rendering colored messages with newline +func (p *Printer) Println(a ...any) { + doPrintlnV2(p.Code, a) +} + +// IsEmpty color code +func (p *Printer) IsEmpty() bool { + return p.Code == "" +} + +/************************************************************* + * SimplePrinter struct + *************************************************************/ + +// SimplePrinter use for quick use color print on inject to struct +type SimplePrinter struct{} + +// Print message +func (s *SimplePrinter) Print(v ...any) { + Print(v...) +} + +// Printf message +func (s *SimplePrinter) Printf(format string, v ...any) { + Printf(format, v...) +} + +// Println message +func (s *SimplePrinter) Println(v ...any) { + Println(v...) +} + +// Successf message +func (s *SimplePrinter) Successf(format string, a ...any) { + Success.Printf(format, a...) +} + +// Successln message +func (s *SimplePrinter) Successln(a ...any) { + Success.Println(a...) +} + +// Infof message +func (s *SimplePrinter) Infof(format string, a ...any) { + Info.Printf(format, a...) +} + +// Infoln message +func (s *SimplePrinter) Infoln(a ...any) { + Info.Println(a...) +} + +// Warnf message +func (s *SimplePrinter) Warnf(format string, a ...any) { + Warn.Printf(format, a...) +} + +// Warnln message +func (s *SimplePrinter) Warnln(a ...any) { + Warn.Println(a...) +} + +// Errorf message +func (s *SimplePrinter) Errorf(format string, a ...any) { + Error.Printf(format, a...) +} + +// Errorln message +func (s *SimplePrinter) Errorln(a ...any) { + Error.Println(a...) +} diff --git a/vendor/github.com/gookit/color/quickstart.go b/vendor/github.com/gookit/color/quickstart.go new file mode 100644 index 0000000000..b368b8a14b --- /dev/null +++ b/vendor/github.com/gookit/color/quickstart.go @@ -0,0 +1,108 @@ +package color + +/************************************************************* + * quick use color print message + *************************************************************/ + +// Redp print message with Red color +func Redp(a ...any) { Red.Print(a...) } + +// Redf print message with Red color +func Redf(format string, a ...any) { Red.Printf(format, a...) } + +// Redln print message line with Red color +func Redln(a ...any) { Red.Println(a...) } + +// Bluep print message with Blue color +func Bluep(a ...any) { Blue.Print(a...) } + +// Bluef print message with Blue color +func Bluef(format string, a ...any) { Blue.Printf(format, a...) } + +// Blueln print message line with Blue color +func Blueln(a ...any) { Blue.Println(a...) } + +// Cyanp print message with Cyan color +func Cyanp(a ...any) { Cyan.Print(a...) } + +// Cyanf print message with Cyan color +func Cyanf(format string, a ...any) { Cyan.Printf(format, a...) } + +// Cyanln print message line with Cyan color +func Cyanln(a ...any) { Cyan.Println(a...) } + +// Grayp print message with Gray color +func Grayp(a ...any) { Gray.Print(a...) } + +// Grayf print message with Gray color +func Grayf(format string, a ...any) { Gray.Printf(format, a...) } + +// Grayln print message line with Gray color +func Grayln(a ...any) { Gray.Println(a...) } + +// Greenp print message with Green color +func Greenp(a ...any) { Green.Print(a...) } + +// Greenf print message with Green color +func Greenf(format string, a ...any) { Green.Printf(format, a...) } + +// Greenln print message line with Green color +func Greenln(a ...any) { Green.Println(a...) } + +// Yellowp print message with Yellow color +func Yellowp(a ...any) { Yellow.Print(a...) } + +// Yellowf print message with Yellow color +func Yellowf(format string, a ...any) { Yellow.Printf(format, a...) } + +// Yellowln print message line with Yellow color +func Yellowln(a ...any) { Yellow.Println(a...) } + +// Magentap print message with Magenta color +func Magentap(a ...any) { Magenta.Print(a...) } + +// Magentaf print message with Magenta color +func Magentaf(format string, a ...any) { Magenta.Printf(format, a...) } + +// Magentaln print message line with Magenta color +func Magentaln(a ...any) { Magenta.Println(a...) } + +/************************************************************* + * quick use style print message + *************************************************************/ + +// Infop print message with Info color +func Infop(a ...any) { Info.Print(a...) } + +// Infof print message with Info style +func Infof(format string, a ...any) { Info.Printf(format, a...) } + +// Infoln print message with Info style +func Infoln(a ...any) { Info.Println(a...) } + +// Successp print message with success color +func Successp(a ...any) { Success.Print(a...) } + +// Successf print message with success style +func Successf(format string, a ...any) { Success.Printf(format, a...) } + +// Successln print message with success style +func Successln(a ...any) { Success.Println(a...) } + +// Errorp print message with Error color +func Errorp(a ...any) { Error.Print(a...) } + +// Errorf print message with Error style +func Errorf(format string, a ...any) { Error.Printf(format, a...) } + +// Errorln print message with Error style +func Errorln(a ...any) { Error.Println(a...) } + +// Warnp print message with Warn color +func Warnp(a ...any) { Warn.Print(a...) } + +// Warnf print message with Warn style +func Warnf(format string, a ...any) { Warn.Printf(format, a...) } + +// Warnln print message with Warn style +func Warnln(a ...any) { Warn.Println(a...) } diff --git a/vendor/github.com/gookit/color/style.go b/vendor/github.com/gookit/color/style.go new file mode 100644 index 0000000000..a009d1d6e5 --- /dev/null +++ b/vendor/github.com/gookit/color/style.go @@ -0,0 +1,322 @@ +package color + +import ( + "fmt" + "strings" +) + +/************************************************************* + * 16 color Style + *************************************************************/ + +// Style a 16 color style. can add: fg color, bg color, color options +// +// Example: +// +// color.Style{color.FgGreen}.Print("message") +type Style []Color + +// New create a custom style +// +// Usage: +// +// color.New(color.FgGreen).Print("message") +// equals to: +// color.Style{color.FgGreen}.Print("message") +func New(colors ...Color) Style { + return colors +} + +// Save to global styles map +func (s Style) Save(name string) { + AddStyle(name, s) +} + +// Add to global styles map +func (s *Style) Add(cs ...Color) { + *s = append(*s, cs...) +} + +// Render render text +// Usage: +// +// color.New(color.FgGreen).Render("text") +// color.New(color.FgGreen, color.BgBlack, color.OpBold).Render("text") +func (s Style) Render(a ...any) string { + return RenderCode(s.String(), a...) +} + +// Renderln render text line. +// like Println, will add spaces for each argument +// Usage: +// +// color.New(color.FgGreen).Renderln("text", "more") +// color.New(color.FgGreen, color.BgBlack, color.OpBold).Render("text", "more") +func (s Style) Renderln(a ...any) string { + return RenderWithSpaces(s.String(), a...) +} + +// Sprint is alias of the 'Render' +func (s Style) Sprint(a ...any) string { + return RenderCode(s.String(), a...) +} + +// Sprintf format and render message. +func (s Style) Sprintf(format string, a ...any) string { + return RenderString(s.String(), fmt.Sprintf(format, a...)) +} + +// Print render and Print text +func (s Style) Print(a ...any) { + doPrintV2(s.String(), fmt.Sprint(a...)) +} + +// Printf render and print text +func (s Style) Printf(format string, a ...any) { + doPrintV2(s.Code(), fmt.Sprintf(format, a...)) +} + +// Println render and print text line +func (s Style) Println(a ...any) { + doPrintlnV2(s.String(), a) +} + +// Code convert to code string. returns like "32;45;3" +func (s Style) Code() string { + return s.String() +} + +// String convert to code string. returns like "32;45;3" +func (s Style) String() string { + return Colors2code(s...) +} + +// IsEmpty style +func (s Style) IsEmpty() bool { + return len(s) == 0 +} + +/************************************************************* + * Theme(extended Style) + *************************************************************/ + +// Theme definition. extends from Style +type Theme struct { + // Name theme name + Name string + // Style for the theme + Style +} + +// NewTheme instance +func NewTheme(name string, style Style) *Theme { + return &Theme{name, style} +} + +// Save to themes map +func (t *Theme) Save() { + AddTheme(t.Name, t.Style) +} + +// Tips use name as title, only apply style for name +func (t *Theme) Tips(format string, a ...any) { + // only apply style for name + t.Print(strings.ToUpper(t.Name) + ": ") + Printf(format+"\n", a...) +} + +// Prompt use name as title, and apply style for message +func (t *Theme) Prompt(format string, a ...any) { + title := strings.ToUpper(t.Name) + ":" + t.Println(title, fmt.Sprintf(format, a...)) +} + +// Block like Prompt, but will wrap a empty line +func (t *Theme) Block(format string, a ...any) { + title := strings.ToUpper(t.Name) + ":\n" + + t.Println(title, fmt.Sprintf(format, a...)) +} + +/************************************************************* + * Theme: internal themes + *************************************************************/ + +// internal themes(like bootstrap style) +// Usage: +// +// color.Info.Print("message") +// color.Info.Printf("a %s message", "test") +// color.Warn.Println("message") +// color.Error.Println("message") +var ( + // Info color style + Info = &Theme{"info", Style{OpReset, FgGreen}} + // Note color style + Note = &Theme{"note", Style{OpBold, FgLightCyan}} + // Warn color style + Warn = &Theme{"warning", Style{OpBold, FgYellow}} + // Light color style + Light = &Theme{"light", Style{FgLightWhite, BgBlack}} + // Error color style + Error = &Theme{"error", Style{FgLightWhite, BgRed}} + // Danger color style + Danger = &Theme{"danger", Style{OpBold, FgRed}} + // Debug color style + Debug = &Theme{"debug", Style{OpReset, FgCyan}} + // Notice color style + Notice = &Theme{"notice", Style{OpBold, FgCyan}} + // Comment color style + Comment = &Theme{"comment", Style{OpReset, FgYellow}} + // Success color style + Success = &Theme{"success", Style{OpBold, FgGreen}} + // Primary color style + Primary = &Theme{"primary", Style{OpReset, FgBlue}} + // Question color style + Question = &Theme{"question", Style{OpReset, FgMagenta}} + // Secondary color style + Secondary = &Theme{"secondary", Style{FgDarkGray}} +) + +// Themes internal defined themes. +// Usage: +// +// color.Themes["info"].Println("message") +var Themes = map[string]*Theme{ + "info": Info, + "note": Note, + "light": Light, + "error": Error, + + "debug": Debug, + "danger": Danger, + "notice": Notice, + "success": Success, + "comment": Comment, + "primary": Primary, + "warning": Warn, + + "question": Question, + "secondary": Secondary, +} + +// AddTheme add a theme and style +func AddTheme(name string, style Style) { + Themes[name] = NewTheme(name, style) + Styles[name] = style +} + +// GetTheme get defined theme by name +func GetTheme(name string) *Theme { + return Themes[name] +} + +/************************************************************* + * internal styles + *************************************************************/ + +// Styles internal defined styles, like bootstrap styles. +// Usage: +// +// color.Styles["info"].Println("message") +var Styles = map[string]Style{ + "info": {OpReset, FgGreen}, + "note": {OpBold, FgLightCyan}, + "light": {FgLightWhite, BgRed}, + "error": {FgLightWhite, BgRed}, + + "danger": {OpBold, FgRed}, + "notice": {OpBold, FgCyan}, + "success": {OpBold, FgGreen}, + "comment": {OpReset, FgMagenta}, + "primary": {OpReset, FgBlue}, + "warning": {OpBold, FgYellow}, + + "question": {OpReset, FgMagenta}, + "secondary": {FgDarkGray}, +} + +// some style name alias +var styleAliases = map[string]string{ + "err": "error", + "suc": "success", + "warn": "warning", +} + +// AddStyle add a style +func AddStyle(name string, s Style) { + Styles[name] = s +} + +// GetStyle get defined style by name +func GetStyle(name string) Style { + if s, ok := Styles[name]; ok { + return s + } + + if realName, ok := styleAliases[name]; ok { + return Styles[realName] + } + + // empty style + return New() +} + +/************************************************************* + * color scheme + *************************************************************/ + +// Scheme struct +type Scheme struct { + Name string + Styles map[string]Style +} + +// NewScheme create new Scheme +func NewScheme(name string, styles map[string]Style) *Scheme { + return &Scheme{Name: name, Styles: styles} +} + +// NewDefaultScheme create an defuault color Scheme +func NewDefaultScheme(name string) *Scheme { + return NewScheme(name, map[string]Style{ + "info": {OpReset, FgGreen}, + "warn": {OpBold, FgYellow}, + "error": {FgLightWhite, BgRed}, + }) +} + +// Style get by name +func (s *Scheme) Style(name string) Style { + return s.Styles[name] +} + +// Infof message print +func (s *Scheme) Infof(format string, a ...any) { + s.Styles["info"].Printf(format, a...) +} + +// Infoln message print +func (s *Scheme) Infoln(v ...any) { + s.Styles["info"].Println(v...) +} + +// Warnf message print +func (s *Scheme) Warnf(format string, a ...any) { + s.Styles["warn"].Printf(format, a...) +} + +// Warnln message print +func (s *Scheme) Warnln(v ...any) { + s.Styles["warn"].Println(v...) +} + +// Errorf message print +func (s *Scheme) Errorf(format string, a ...any) { + s.Styles["error"].Printf(format, a...) +} + +// Errorln message print +func (s *Scheme) Errorln(v ...any) { + s.Styles["error"].Println(v...) +} diff --git a/vendor/github.com/gookit/color/utils.go b/vendor/github.com/gookit/color/utils.go new file mode 100644 index 0000000000..b6920f6dc7 --- /dev/null +++ b/vendor/github.com/gookit/color/utils.go @@ -0,0 +1,209 @@ +package color + +import ( + "fmt" + "io" + "log" + "strings" +) + +// SetTerminal by given code. +func SetTerminal(code string) error { + if !Enable || !SupportColor() { + return nil + } + + _, err := fmt.Fprintf(output, SettingTpl, code) + return err +} + +// ResetTerminal terminal setting. +func ResetTerminal() error { + if !Enable || !SupportColor() { + return nil + } + + _, err := fmt.Fprint(output, ResetSet) + return err +} + +/************************************************************* + * print methods(will auto parse color tags) + *************************************************************/ + +// Print render color tag and print messages +func Print(a ...any) { + Fprint(output, a...) +} + +// Printf format and print messages +func Printf(format string, a ...any) { + Fprintf(output, format, a...) +} + +// Println messages with new line +func Println(a ...any) { + Fprintln(output, a...) +} + +// Fprint print rendered messages to writer +// +// Notice: will ignore print error +func Fprint(w io.Writer, a ...any) { + _, err := fmt.Fprint(w, Render(a...)) + saveInternalError(err) +} + +// Fprintf print format and rendered messages to writer. +// Notice: will ignore print error +func Fprintf(w io.Writer, format string, a ...any) { + str := fmt.Sprintf(format, a...) + _, err := fmt.Fprint(w, ReplaceTag(str)) + saveInternalError(err) +} + +// Fprintln print rendered messages line to writer +// Notice: will ignore print error +func Fprintln(w io.Writer, a ...any) { + str := formatArgsForPrintln(a) + _, err := fmt.Fprintln(w, ReplaceTag(str)) + saveInternalError(err) +} + +// Lprint passes colored messages to a log.Logger for printing. +// Notice: should be goroutine safe +func Lprint(l *log.Logger, a ...any) { + l.Print(Render(a...)) +} + +// Render parse color tags, return rendered string. +// +// Usage: +// +// text := Render("hello world!") +// fmt.Println(text) +func Render(a ...any) string { + if len(a) == 0 { + return "" + } + return ReplaceTag(fmt.Sprint(a...)) +} + +// Sprint parse color tags, return rendered string +func Sprint(a ...any) string { + if len(a) == 0 { + return "" + } + return ReplaceTag(fmt.Sprint(a...)) +} + +// Sprintf format and return rendered string +func Sprintf(format string, a ...any) string { + return ReplaceTag(fmt.Sprintf(format, a...)) +} + +// String alias of the ReplaceTag +func String(s string) string { return ReplaceTag(s) } + +// Text alias of the ReplaceTag +func Text(s string) string { return ReplaceTag(s) } + +// Uint8sToInts convert []uint8 to []int +// func Uint8sToInts(u8s []uint8 ) []int { +// ints := make([]int, len(u8s)) +// for i, u8 := range u8s { +// ints[i] = int(u8) +// } +// return ints +// } + +/************************************************************* + * helper methods for print + *************************************************************/ + +// new implementation, support render full color code on pwsh.exe, cmd.exe +func doPrintV2(code, str string) { + _, err := fmt.Fprint(output, RenderString(code, str)) + saveInternalError(err) +} + +// new implementation, support render full color code on pwsh.exe, cmd.exe +func doPrintlnV2(code string, args []any) { + str := formatArgsForPrintln(args) + _, err := fmt.Fprintln(output, RenderString(code, str)) + saveInternalError(err) +} + +// use Println, will add spaces for each arg +func formatArgsForPrintln(args []any) (message string) { + if ln := len(args); ln == 0 { + message = "" + } else if ln == 1 { + message = fmt.Sprint(args[0]) + } else { + message = fmt.Sprintln(args...) + // clear last "\n" + message = message[:len(message)-1] + } + return +} + +/************************************************************* + * helper methods + *************************************************************/ + +// is on debug mode +// func isDebugMode() bool { +// return debugMode == "on" +// } + +func debugf(f string, v ...any) { + if debugMode { + fmt.Print("COLOR_DEBUG: ") + fmt.Printf(f, v...) + fmt.Println() + } +} + +// equals: return ok ? val1 : val2 +func isValidUint8(val int) bool { + return val >= 0 && val < 256 +} + +// equals: return ok ? val1 : val2 +func compareVal(ok bool, val1, val2 uint8) uint8 { + if ok { + return val1 + } + return val2 +} + +// equals: return ok ? val1 : val2 +func compareF64Val(ok bool, val1, val2 float64) float64 { + if ok { + return val1 + } + return val2 +} + +func saveInternalError(err error) { + if err != nil { + debugf("inner error: %s", err.Error()) + innerErrs = append(innerErrs, err) + } +} + +func stringToArr(str, sep string) (arr []string) { + str = strings.TrimSpace(str) + if str == "" { + return + } + + ss := strings.Split(str, sep) + for _, val := range ss { + if val = strings.TrimSpace(val); val != "" { + arr = append(arr, val) + } + } + return +} diff --git a/vendor/github.com/goombaio/namegenerator/.gitignore b/vendor/github.com/goombaio/namegenerator/.gitignore new file mode 100644 index 0000000000..f1c181ec9c --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/goombaio/namegenerator/AUTHORS b/vendor/github.com/goombaio/namegenerator/AUTHORS new file mode 100644 index 0000000000..cac995e305 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/AUTHORS @@ -0,0 +1 @@ +Raül Pérez (repejota) \ No newline at end of file diff --git a/vendor/github.com/goombaio/namegenerator/CONTRIBUTORS b/vendor/github.com/goombaio/namegenerator/CONTRIBUTORS new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/goombaio/namegenerator/HACKING.md b/vendor/github.com/goombaio/namegenerator/HACKING.md new file mode 100644 index 0000000000..16572bdf41 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/HACKING.md @@ -0,0 +1,12 @@ +# Hacking + +## Install deps, dev-deps and run test + +```sh +$ git clone git@github.com:goombaio/namegenerator.git +$ cd skeleton +export GO111MODULE=on # ref: https://dave.cheney.net/2018/07/16/using-go-modules-with-travis-ci +make deps +make dev-deps +make test +``` diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/goombaio/namegenerator/LICENSE similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/LICENSE rename to vendor/github.com/goombaio/namegenerator/LICENSE diff --git a/vendor/github.com/goombaio/namegenerator/Makefile b/vendor/github.com/goombaio/namegenerator/Makefile new file mode 100644 index 0000000000..dbebd4ce81 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/Makefile @@ -0,0 +1,89 @@ +include Makefile.help.mk + +BINARY=namegenerator +MAIN_PACKAGE=cmd/${BINARY}/main.go +PACKAGES = $(shell go list ./...) +VERSION=`cat VERSION` +BUILD=`git symbolic-ref HEAD 2> /dev/null | cut -b 12-`-`git log --pretty=format:%h -1` +DIST_FOLDER=dist +DIST_INCLUDE_FILES=README.md LICENSE VERSION + +# Setup -ldflags option for go build here, interpolate the variable values +LDFLAGS=-ldflags "-X main.Version=${VERSION} -X main.Build=${BUILD}" + +# Build & Install + +install: ## Build and install package on your system + go install $(LDFLAGS) -v $(PACKAGES) + +.PHONY: version +version: ## Show version information + @echo $(VERSION)-$(BUILD) + +# Testing + +.PHONY: test +test: ## Execute package tests + go test -v $(PACKAGES) + +.PHONY: test-race +test-race: + go test -race -v $(PACKAGES) + +.PHONY: cover-profile +cover-profile: + echo "mode: count" > coverage-all.out + $(foreach pkg,$(PACKAGES),\ + go test -coverprofile=coverage.out -covermode=count $(pkg);\ + tail -n +2 coverage.out >> coverage-all.out;) + rm -rf coverage.out + +.PHONY: cover +cover: cover-profile +cover: ## Generate test coverage data + go tool cover -func=coverage-all.out + +.PHONY: cover-html +cover-html: cover-profile +cover-html: ## Generate coverage report + go tool cover -html=coverage-all.out + +.PHONY: codecov +codecov: + bash <(curl -s https://codecov.io/bash) + +# BenchMarking + +.PHONY: benchmark +benchmark: ## Execute package benchmarks + go test -v $(PACKAGES) -benchmem -bench . + +# Dependencies + +deps: ## Install build dependencies + go get -u=patch + go mod tidy -v + go mod download + go mod verify + +dev-deps: deps +dev-deps: ## Install dev and build dependencies + +.PHONY: clean +clean: ## Delete generated development environment + go clean + rm -rf ${BINARY}-*-* + rm -rf ${BINARY}-*-*.exe + rm -rf ${BINARY}-*-*.zip + rm -rf coverage-all.out + +# Lint + +.PHONY: lint +lint: ## Lint source code + ./lint.bash + +# Docs + +godoc-serve: ## Serve documentation (godoc format) for this package at port HTTP 9090 + godoc -http=":9090" diff --git a/vendor/github.com/goombaio/namegenerator/Makefile.help.mk b/vendor/github.com/goombaio/namegenerator/Makefile.help.mk new file mode 100644 index 0000000000..b9dbd86912 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/Makefile.help.mk @@ -0,0 +1,5 @@ +.PHONY: help +help: ## Show this help ( default ) + @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' + +.DEFAULT_GOAL := help diff --git a/vendor/github.com/goombaio/namegenerator/README.md b/vendor/github.com/goombaio/namegenerator/README.md new file mode 100644 index 0000000000..f05cf5440a --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/README.md @@ -0,0 +1,73 @@ +# namegenerator + +A random name generator (for projects, servers, cluster nodes, etc ...) +implementation in Golang. + +## Badges + +[![License][License-Image]][License-URL] +[![CircleCI Status][CircleCI-Image]][CircleCI-URL] +[![Coverage Report][Coverage-Image]][Coverage-URL] +[![Go Report Card][GoReportCard-Image]][GoReportCard-URL] +[![CII Best Practices][CII-Image]][CII-URL] +[![GoDoc][GoDoc-Image]][GoDoc-URL] + +## Install + +```bash +go get github.com/goombaio/namegenerator +``` + +You can also update an already installed version: + +```bash +go get -u github.com/goombaio/namegenerator +``` + +## Example of use + +```go +package main + +import ( + "github.com/goombaio/namegenerator" +) + +func main() { + seed := time.Now().UTC().UnixNano() + nameGenerator := namegenerator.NewNameGenerator(seed) + + name := nameGenerator.Generate() + + fmt.Println(name) +} +``` + +## License + +Copyright (c) 2018 Goomba project Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +[License-Image]: https://img.shields.io/badge/License-Apache-blue.svg +[License-URL]: http://opensource.org/licenses/Apache +[CircleCI-Image]: https://circleci.com/gh/goombaio/namegenerator.svg?style=svg +[CircleCI-URL]: https://circleci.com/gh/goombaio/namegenerator +[Coverage-Image]: https://codecov.io/gh/goombaio/namegenerator/branch/master/graph/badge.svg +[Coverage-URL]: https://codecov.io/gh/goombaio/namegenerator +[GoReportCard-Image]: https://goreportcard.com/badge/github.com/goombaio/namegenerator +[GoReportCard-URL]: https://goreportcard.com/report/github.com/goombaio/namegenerator +[CII-Image]: https://bestpractices.coreinfrastructure.org/projects/2237/badge +[CII-URL]: https://bestpractices.coreinfrastructure.org/projects/2237 +[GoDoc-Image]: https://godoc.org/github.com/goombaio/namegenerator?status.svg +[GoDoc-URL]: http://godoc.org/github.com/goombaio/namegenerator diff --git a/vendor/github.com/goombaio/namegenerator/VERSION b/vendor/github.com/goombaio/namegenerator/VERSION new file mode 100644 index 0000000000..7bcd0e3612 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/VERSION @@ -0,0 +1 @@ +0.0.2 \ No newline at end of file diff --git a/vendor/github.com/goombaio/namegenerator/data.go b/vendor/github.com/goombaio/namegenerator/data.go new file mode 100644 index 0000000000..f00f64b690 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/data.go @@ -0,0 +1,40 @@ +// Copyright 2018, Goomba project Authors. All rights reserved. +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with this +// work for additional information regarding copyright ownership. The ASF +// licenses this file to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package namegenerator + +var ( + // ADJECTIVES ... + ADJECTIVES = []string{"autumn", "hidden", "bitter", "misty", "silent", "empty", "dry", "dark", "summer", + "icy", "delicate", "quiet", "white", "cool", "spring", "winter", "patient", + "twilight", "dawn", "crimson", "wispy", "weathered", "blue", "billowing", + "broken", "cold", "damp", "falling", "frosty", "green", "long", "late", "lingering", + "bold", "little", "morning", "muddy", "old", "red", "rough", "still", "small", + "sparkling", "throbbing", "shy", "wandering", "withered", "wild", "black", + "young", "holy", "solitary", "fragrant", "aged", "snowy", "proud", "floral", + "restless", "divine", "polished", "ancient", "purple", "lively", "nameless"} + + // NOUNS ... + NOUNS = []string{"waterfall", "river", "breeze", "moon", "rain", "wind", "sea", "morning", + "snow", "lake", "sunset", "pine", "shadow", "leaf", "dawn", "glitter", "forest", + "hill", "cloud", "meadow", "sun", "glade", "bird", "brook", "butterfly", + "bush", "dew", "dust", "field", "fire", "flower", "firefly", "feather", "grass", + "haze", "mountain", "night", "pond", "darkness", "snowflake", "silence", + "sound", "sky", "shape", "surf", "thunder", "violet", "water", "wildflower", + "wave", "water", "resonance", "sun", "wood", "dream", "cherry", "tree", "fog", + "frost", "voice", "paper", "frog", "smoke", "star"} +) diff --git a/vendor/github.com/goombaio/namegenerator/doc.go b/vendor/github.com/goombaio/namegenerator/doc.go new file mode 100644 index 0000000000..34f440ba41 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018, Goomba project Authors. All rights reserved. +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with this +// work for additional information regarding copyright ownership. The ASF +// licenses this file to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package namegenerator diff --git a/vendor/github.com/goombaio/namegenerator/generator.go b/vendor/github.com/goombaio/namegenerator/generator.go new file mode 100644 index 0000000000..dd6f2a968d --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/generator.go @@ -0,0 +1,53 @@ +// Copyright 2018, Goomba project Authors. All rights reserved. +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with this +// work for additional information regarding copyright ownership. The ASF +// licenses this file to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package namegenerator + +import ( + "fmt" + "math/rand" +) + +// Generator ... +type Generator interface { + Generate() string +} + +// NameGenerator ... +type NameGenerator struct { + random *rand.Rand +} + +// Generate ... +func (rn *NameGenerator) Generate() string { + randomAdjective := ADJECTIVES[rn.random.Intn(len(ADJECTIVES))] + randomNoun := NOUNS[rn.random.Intn(len(NOUNS))] + + randomName := fmt.Sprintf("%v-%v", randomAdjective, randomNoun) + + return randomName +} + +// NewNameGenerator ... +func NewNameGenerator(seed int64) Generator { + nameGenerator := &NameGenerator{ + random: rand.New(rand.New(rand.NewSource(99))), + } + nameGenerator.random.Seed(seed) + + return nameGenerator +} diff --git a/vendor/github.com/goombaio/namegenerator/lint.bash b/vendor/github.com/goombaio/namegenerator/lint.bash new file mode 100644 index 0000000000..a265040792 --- /dev/null +++ b/vendor/github.com/goombaio/namegenerator/lint.bash @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +# gometalinter + +if [ ! $(command -v gometalinter) ] +then + go get github.com/alecthomas/gometalinter + gometalinter --update --install +fi + +echo "gometalinter:" +time gometalinter \ + --exclude='/thrift/' \ + --exclude='/pb/' \ + --exclude='no args in Log call \(vet\)' \ + --disable=errcheck \ + --disable=dupl \ + --disable=aligncheck \ + --disable=gotype \ + --cyclo-over=20 \ + --tests \ + --concurrency=2 \ + --deadline=300s \ + ./... +echo + +# golangci-lint + +if [ ! $(command -v golangci-lint) ] +then + go get github.com/golangci/golangci-lint/cmd/golangci-lint +fi + + +echo "golangci-lint:" +time golangci-lint \ + run \ + --disable errcheck \ + ./... \ No newline at end of file diff --git a/vendor/github.com/ivanpirog/coloredcobra/.gitignore b/vendor/github.com/ivanpirog/coloredcobra/.gitignore new file mode 100644 index 0000000000..74377ee186 --- /dev/null +++ b/vendor/github.com/ivanpirog/coloredcobra/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.bak + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/ivanpirog/coloredcobra/LICENSE b/vendor/github.com/ivanpirog/coloredcobra/LICENSE new file mode 100644 index 0000000000..dd96724636 --- /dev/null +++ b/vendor/github.com/ivanpirog/coloredcobra/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Ivan Pirog + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ivanpirog/coloredcobra/README.md b/vendor/github.com/ivanpirog/coloredcobra/README.md new file mode 100644 index 0000000000..eaad757d3c --- /dev/null +++ b/vendor/github.com/ivanpirog/coloredcobra/README.md @@ -0,0 +1,140 @@ +![ColoredCobra Logo](https://user-images.githubusercontent.com/8699212/159517235-dd7f8733-20b7-47a8-a1b5-9c91956ca86c.png) + +--- + +**[Cobra](https://github.com/spf13/cobra)** library for creating powerful modern CLI doesn't support color settings for console output. `ColoredCobra` is a small library that allows you to colorize the text output of the Cobra library, making the console output look better. + +![ColoredCobra Look](https://user-images.githubusercontent.com/8699212/159517325-faeac756-49b4-4b98-ba40-9764e8549335.png) + + +`ColoredCobra` provides very simple set of settings that allows you to customize individual parts of Cobra text output by specifying a color for them, as well as bold, italic, underlined styles. + +![ColoredCobra Config](https://user-images.githubusercontent.com/8699212/159517387-a82eafa4-a0bb-4bc9-a05a-67b05e6ae15c.png) + + +It's very easy to add `ColoredCobra` to your project! + +--- + +## Installing + +Open terminal and execute: + +```bash +go get -u github.com/ivanpirog/coloredcobra +``` + +## Quick start + +Open your `cmd/root.go` and insert this code: + +```go +import cc "github.com/ivanpirog/coloredcobra" +``` + +Or: + +```go +import ( + ... + cc "github.com/ivanpirog/coloredcobra" +) +``` + +Then put this code at the beginning of the `Execute()` function: + +```go + cc.Init(&cc.Config{ + RootCmd: rootCmd, + Headings: cc.HiCyan + cc.Bold + cc.Underline, + Commands: cc.HiYellow + cc.Bold, + Example: cc.Italic, + ExecName: cc.Bold, + Flags: cc.Bold, + }) +``` + +That's all. Now build your project and see the output of the help command. + +## Overview + +`Config{}` has just one required parameter `RootCmd`. This is a pointer to the Cobra's root command. Rest of parameters have default values. + +Style of any part of text output is represented by a sum of predefined constants. For example: + +```go +Headings: cc.HiYellow + cc.Bold + cc.Underline +Commands: cc.Red + cc.Bold +ExecName: cc.Bold // equals cc.White + cc.Bold +Example: cc.Underline // equals cc.White + cc.Underline +``` + +### Available color constants: + +``` +Black +Red +Green +Yellow +Blue +Magenta +Cyan +White (default) +HiRed (Hi-Intensity Red) +HiGreen +HiYellow +HiBlue +HiMagenta +HiCyan +HiWhite +``` + +### Available text formatting constants: + +``` +Bold +Italic +Underline +``` + +### Available config parameters: + +![Config Parameters](https://user-images.githubusercontent.com/8699212/159517553-7ef67fac-371b-4995-bebe-d702b6167fe1.png) + + +* `Headings:` headers style. + +* `Commands:` commands style. + +* `CmdShortDescr:` short description of commands style. + +* `ExecName:` executable name style. + +* `Flags:` short and long flag names (-f, --flag) style. + +* `FlagsDataType:` style of flags data type. + +* `FlagsDescr:` flags description text style. + +* `Aliases:` list of command aliases style. + +* `Example:` example text style. + +* `NoExtraNewlines:` no line breaks before and after headings, if `true`. By default: `false`. + +* `NoBottomNewline:` no line break at the end of Cobra's output, if `true`. By default: `false`. + +
+ +### `NoExtraNewlines` parameter results: + +![extranewlines](https://user-images.githubusercontent.com/8699212/159517630-00855ffe-80df-4670-a054-e695f6c4fea7.png) + + +## How it works + +`ColoredCobra` patches Cobra's usage template and extends it with functions for text styling. [fatih/color](https://github.com/fatih/color) library is used for coloring text output in console. + +## License + +ColoredCobra is released under the MIT license. See [LICENSE](https://github.com/ivanpirog/coloredcobra/blob/main/LICENSE). diff --git a/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go b/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go new file mode 100644 index 0000000000..f06625ee35 --- /dev/null +++ b/vendor/github.com/ivanpirog/coloredcobra/coloredcobra.go @@ -0,0 +1,350 @@ +// ColoredCobra allows you to colorize Cobra's text output, +// making it look better using simple settings to customize +// individual parts of console output. +// +// Usage example: +// +// 1. Insert in cmd/root.go file of your project : +// +// import cc "github.com/ivanpirog/coloredcobra" +// +// +// 2. Put the following code to the beginning of the Execute() function: +// +// cc.Init(&cc.Config{ +// RootCmd: rootCmd, +// Headings: cc.Bold + cc.Underline, +// Commands: cc.Yellow + cc.Bold, +// ExecName: cc.Bold, +// Flags: cc.Bold, +// }) +// +// +// 3. Build & execute your code. +// +// +// Copyright © 2022 Ivan Pirog . +// Released under the MIT license. +// Project home: https://github.com/ivanpirog/coloredcobra +// +package coloredcobra + +import ( + "regexp" + "strings" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +// Config is a settings structure which sets styles for individual parts of Cobra text output. +// +// Note that RootCmd is required. +// +// Example: +// +// c := &cc.Config{ +// RootCmd: rootCmd, +// Headings: cc.HiWhite + cc.Bold + cc.Underline, +// Commands: cc.Yellow + cc.Bold, +// CmdShortDescr: cc.Cyan, +// ExecName: cc.Bold, +// Flags: cc.Bold, +// Aliases: cc.Bold, +// Example: cc.Italic, +// } +type Config struct { + RootCmd *cobra.Command + Headings uint8 + Commands uint8 + CmdShortDescr uint8 + ExecName uint8 + Flags uint8 + FlagsDataType uint8 + FlagsDescr uint8 + Aliases uint8 + Example uint8 + NoExtraNewlines bool + NoBottomNewline bool +} + +// Constants for colors and B, I, U +const ( + None = 0 + Black = 1 + Red = 2 + Green = 3 + Yellow = 4 + Blue = 5 + Magenta = 6 + Cyan = 7 + White = 8 + HiRed = 9 + HiGreen = 10 + HiYellow = 11 + HiBlue = 12 + HiMagenta = 13 + HiCyan = 14 + HiWhite = 15 + Bold = 16 + Italic = 32 + Underline = 64 +) + +// Init patches Cobra's usage template with configuration provided. +func Init(cfg *Config) { + + if cfg.RootCmd == nil { + panic("coloredcobra: Root command pointer is missing.") + } + + // Get usage template + tpl := cfg.RootCmd.UsageTemplate() + + // + // Add extra line breaks for headings + // + if cfg.NoExtraNewlines == false { + tpl = strings.NewReplacer( + "Usage:", "\nUsage:\n", + "Aliases:", "\nAliases:\n", + "Examples:", "\nExamples:\n", + "Available Commands:", "\nAvailable Commands:\n", + "Global Flags:", "\nGlobal Flags:\n", + "Additional help topics:", "\nAdditional help topics:\n", + "Use \"", "\nUse \"", + ).Replace(tpl) + re := regexp.MustCompile(`(?m)^Flags:$`) + tpl = re.ReplaceAllString(tpl, "\nFlags:\n") + } + + // + // Styling headers + // + if cfg.Headings != None { + ch := getColor(cfg.Headings) + + // Add template function to style the headers + cobra.AddTemplateFunc("HeadingStyle", ch.SprintFunc()) + + // Wrap template headers into a new function + tpl = strings.NewReplacer( + "Usage:", `{{HeadingStyle "Usage:"}}`, + "Aliases:", `{{HeadingStyle "Aliases:"}}`, + "Examples:", `{{HeadingStyle "Examples:"}}`, + "Available Commands:", `{{HeadingStyle "Available Commands:"}}`, + "Global Flags:", `{{HeadingStyle "Global Flags:"}}`, + "Additional help topics:", `{{HeadingStyle "Additional help topics:"}}`, + ).Replace(tpl) + + re := regexp.MustCompile(`(?m)^(\s*)Flags:(\s*)$`) + tpl = re.ReplaceAllString(tpl, `$1{{HeadingStyle "Flags:"}}$2`) + } + + // + // Styling commands + // + if cfg.Commands != None { + cc := getColor(cfg.Commands) + + // Add template function to style commands + cobra.AddTemplateFunc("CommandStyle", cc.SprintFunc()) + cobra.AddTemplateFunc("sum", func(a, b int) int { + return a + b + }) + + // Patch usage template + re := regexp.MustCompile(`(?i){{\s*rpad\s+.Name\s+.NamePadding\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{rpad (CommandStyle .Name) (sum .NamePadding 12)}}") + + re = regexp.MustCompile(`(?i){{\s*rpad\s+.CommandPath\s+.CommandPathPadding\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{rpad (CommandStyle .CommandPath) (sum .CommandPathPadding 12)}}") + } + + // + // Styling a short desription of commands + // + if cfg.CmdShortDescr != None { + csd := getColor(cfg.CmdShortDescr) + + cobra.AddTemplateFunc("CmdShortStyle", csd.SprintFunc()) + + re := regexp.MustCompile(`(?ism)({{\s*range\s+.Commands\s*}}.*?){{\s*.Short\s*}}`) + tpl = re.ReplaceAllString(tpl, `$1{{CmdShortStyle .Short}}`) + } + + // + // Styling executable file name + // + if cfg.ExecName != None { + cen := getColor(cfg.ExecName) + + // Add template functions + cobra.AddTemplateFunc("ExecStyle", cen.SprintFunc()) + cobra.AddTemplateFunc("UseLineStyle", func(s string) string { + spl := strings.Split(s, " ") + spl[0] = cen.Sprint(spl[0]) + return strings.Join(spl, " ") + }) + + // Patch usage template + re := regexp.MustCompile(`(?i){{\s*.CommandPath\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{ExecStyle .CommandPath}}") + + re = regexp.MustCompile(`(?i){{\s*.UseLine\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{UseLineStyle .UseLine}}") + } + + // + // Styling flags + // + var cf, cfd, cfdt *color.Color + if cfg.Flags != None { + cf = getColor(cfg.Flags) + } + if cfg.FlagsDescr != None { + cfd = getColor(cfg.FlagsDescr) + } + if cfg.FlagsDataType != None { + cfdt = getColor(cfg.FlagsDataType) + } + if cf != nil || cfd != nil || cfdt != nil { + + cobra.AddTemplateFunc("FlagStyle", func(s string) string { + + // Flags info section is multi-line. + // Let's split these lines and iterate them. + lines := strings.Split(s, "\n") + for k := range lines { + + // Styling short and full flags (-f, --flag) + if cf != nil { + re := regexp.MustCompile(`(--?\S+)`) + for _, flag := range re.FindAllString(lines[k], 2) { + lines[k] = strings.Replace(lines[k], flag, cf.Sprint(flag), 1) + } + } + + // If no styles for flag data types and description - continue + if cfd == nil && cfdt == nil { + continue + } + + // Split line into two parts: flag data type and description + // Tip: Use debugger to understand the logic + re := regexp.MustCompile(`\s{2,}`) + spl := re.Split(lines[k], -1) + if len(spl) != 3 { + continue + } + + // Styling the flag description + if cfd != nil { + lines[k] = strings.Replace(lines[k], spl[2], cfd.Sprint(spl[2]), 1) + } + + // Styling flag data type + // Tip: Use debugger to understand the logic + if cfdt != nil { + re = regexp.MustCompile(`\s+(\w+)$`) // the last word after spaces is the flag data type + m := re.FindAllStringSubmatch(spl[1], -1) + if len(m) == 1 && len(m[0]) == 2 { + lines[k] = strings.Replace(lines[k], m[0][1], cfdt.Sprint(m[0][1]), 1) + } + } + + } + s = strings.Join(lines, "\n") + + return s + + }) + + // Patch usage template + re := regexp.MustCompile(`(?i)(\.(InheritedFlags|LocalFlags)\.FlagUsages)`) + tpl = re.ReplaceAllString(tpl, "FlagStyle $1") + } + + // + // Styling aliases + // + if cfg.Aliases != None { + ca := getColor(cfg.Aliases) + cobra.AddTemplateFunc("AliasStyle", ca.SprintFunc()) + + re := regexp.MustCompile(`(?i){{\s*.NameAndAliases\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{AliasStyle .NameAndAliases}}") + } + + // + // Styling the example text + // + if cfg.Example != None { + ce := getColor(cfg.Example) + cobra.AddTemplateFunc("ExampleStyle", ce.SprintFunc()) + + re := regexp.MustCompile(`(?i){{\s*.Example\s*}}`) + tpl = re.ReplaceAllLiteralString(tpl, "{{ExampleStyle .Example}}") + } + + // Adding a new line to the end + if !cfg.NoBottomNewline { + tpl += "\n" + } + + // Apply patched template + cfg.RootCmd.SetUsageTemplate(tpl) + // Debug line, uncomment when needed + // fmt.Println(tpl) +} + +// getColor decodes color param and returns color.Color object +func getColor(param uint8) (c *color.Color) { + + switch param & 15 { + case None: + c = color.New(color.FgWhite) + case Black: + c = color.New(color.FgBlack) + case Red: + c = color.New(color.FgRed) + case Green: + c = color.New(color.FgGreen) + case Yellow: + c = color.New(color.FgYellow) + case Blue: + c = color.New(color.FgBlue) + case Magenta: + c = color.New(color.FgMagenta) + case Cyan: + c = color.New(color.FgCyan) + case White: + c = color.New(color.FgWhite) + case HiRed: + c = color.New(color.FgHiRed) + case HiGreen: + c = color.New(color.FgHiGreen) + case HiYellow: + c = color.New(color.FgHiYellow) + case HiBlue: + c = color.New(color.FgHiBlue) + case HiMagenta: + c = color.New(color.FgHiMagenta) + case HiCyan: + c = color.New(color.FgHiCyan) + case HiWhite: + c = color.New(color.FgHiWhite) + } + + if param&Bold == Bold { + c.Add(color.Bold) + } + if param&Italic == Italic { + c.Add(color.Italic) + } + if param&Underline == Underline { + c.Add(color.Underline) + } + + return +} diff --git a/vendor/github.com/lestrrat-go/blackmagic/.gitignore b/vendor/github.com/lestrrat-go/blackmagic/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/blackmagic/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/blackmagic/LICENSE b/vendor/github.com/lestrrat-go/blackmagic/LICENSE new file mode 100644 index 0000000000..188ea7685c --- /dev/null +++ b/vendor/github.com/lestrrat-go/blackmagic/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/blackmagic/README.md b/vendor/github.com/lestrrat-go/blackmagic/README.md new file mode 100644 index 0000000000..0356f8a72b --- /dev/null +++ b/vendor/github.com/lestrrat-go/blackmagic/README.md @@ -0,0 +1,3 @@ +# blackmagic + +Reflect-based black magic. YMMV, and use with caution diff --git a/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go new file mode 100644 index 0000000000..8d1d468543 --- /dev/null +++ b/vendor/github.com/lestrrat-go/blackmagic/blackmagic.go @@ -0,0 +1,54 @@ +package blackmagic + +import ( + "fmt" + "reflect" +) + +// AssignIfCompatible is a convenience function to safely +// assign arbitrary values. dst must be a pointer to an +// empty interface, or it must be a pointer to a compatible +// variable type that can hold src. +func AssignIfCompatible(dst, src interface{}) error { + orv := reflect.ValueOf(src) // save this value for error reporting + result := orv + + // t can be a pointer or a slice, and the code will slightly change + // depending on this + var isSlice bool + switch result.Kind() { + case reflect.Ptr: + // no op + case reflect.Slice: + isSlice = true + default: + return fmt.Errorf("argument t to AssignIfCompatible must be a pointer or a slice: %T", src) + } + + rv := reflect.ValueOf(dst) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf(`argument to AssignIfCompatible() must be a pointer: %T`, dst) + } + + actualDst := rv.Elem() + switch actualDst.Kind() { + case reflect.Interface: + // If it's an interface, we can just assign the pointer to the interface{} + default: + // If it's a pointer to the struct we're looking for, we need to set + // the de-referenced struct + if !isSlice { + result = result.Elem() + } + } + if !result.Type().AssignableTo(actualDst.Type()) { + return fmt.Errorf(`argument to AssignIfCompatible() must be compatible with %T (was %T)`, orv.Interface(), dst) + } + + if !actualDst.CanSet() { + return fmt.Errorf(`argument to AssignIfCompatible() must be settable`) + } + actualDst.Set(result) + + return nil +} diff --git a/vendor/github.com/lestrrat-go/httpcc/.gitignore b/vendor/github.com/lestrrat-go/httpcc/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httpcc/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/httpcc/LICENSE b/vendor/github.com/lestrrat-go/httpcc/LICENSE new file mode 100644 index 0000000000..963209bfba --- /dev/null +++ b/vendor/github.com/lestrrat-go/httpcc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/httpcc/README.md b/vendor/github.com/lestrrat-go/httpcc/README.md new file mode 100644 index 0000000000..cf2dcb327c --- /dev/null +++ b/vendor/github.com/lestrrat-go/httpcc/README.md @@ -0,0 +1,35 @@ +httpcc +====== + +Parses HTTP/1.1 Cache-Control header, and returns a struct that is convenient +for the end-user to do what they will with. + +# Parsing the HTTP Request + +```go +dir, err := httpcc.ParseRequest(req.Header.Get(`Cache-Control`)) +// dir.MaxAge() uint64, bool +// dir.MaxStale() uint64, bool +// dir.MinFresh() uint64, bool +// dir.NoCache() bool +// dir.NoStore() bool +// dir.NoTransform() bool +// dir.OnlyIfCached() bool +// dir.Extensions() map[string]string +``` + +# Parsing the HTTP Response + +```go +directives, err := httpcc.ParseResponse(res.Header.Get(`Cache-Control`)) +// dir.MaxAge() uint64, bool +// dir.MustRevalidate() bool +// dir.NoCache() []string +// dir.NoStore() bool +// dir.NoTransform() bool +// dir.Public() bool +// dir.Private() bool +// dir.SMaxAge() uint64, bool +// dir.Extensions() map[string]string +``` + diff --git a/vendor/github.com/lestrrat-go/httpcc/directives.go b/vendor/github.com/lestrrat-go/httpcc/directives.go new file mode 100644 index 0000000000..86cbbf0b9a --- /dev/null +++ b/vendor/github.com/lestrrat-go/httpcc/directives.go @@ -0,0 +1,117 @@ +package httpcc + +type RequestDirective struct { + maxAge *uint64 + maxStale *uint64 + minFresh *uint64 + noCache bool + noStore bool + noTransform bool + onlyIfCached bool + extensions map[string]string +} + +func (d *RequestDirective) MaxAge() (uint64, bool) { + if v := d.maxAge; v != nil { + return *v, true + } + return 0, false +} + +func (d *RequestDirective) MaxStale() (uint64, bool) { + if v := d.maxStale; v != nil { + return *v, true + } + return 0, false +} + +func (d *RequestDirective) MinFresh() (uint64, bool) { + if v := d.minFresh; v != nil { + return *v, true + } + return 0, false +} + +func (d *RequestDirective) NoCache() bool { + return d.noCache +} + +func (d *RequestDirective) NoStore() bool { + return d.noStore +} + +func (d *RequestDirective) NoTransform() bool { + return d.noTransform +} + +func (d *RequestDirective) OnlyIfCached() bool { + return d.onlyIfCached +} + +func (d *RequestDirective) Extensions() map[string]string { + return d.extensions +} + +func (d *RequestDirective) Extension(s string) string { + return d.extensions[s] +} + +type ResponseDirective struct { + maxAge *uint64 + noCache []string + noStore bool + noTransform bool + public bool + private []string + proxyRevalidate bool + sMaxAge *uint64 + extensions map[string]string +} + +func (d *ResponseDirective) MaxAge() (uint64, bool) { + if v := d.maxAge; v != nil { + return *v, true + } + return 0, false +} + +func (d *ResponseDirective) NoCache() []string { + return d.noCache +} + +func (d *ResponseDirective) NoStore() bool { + return d.noStore +} + +func (d *ResponseDirective) NoTransform() bool { + return d.noTransform +} + +func (d *ResponseDirective) Public() bool { + return d.public +} + +func (d *ResponseDirective) Private() []string { + return d.private +} + +func (d *ResponseDirective) ProxyRevalidate() bool { + return d.proxyRevalidate +} + +func (d *ResponseDirective) SMaxAge() (uint64, bool) { + if v := d.sMaxAge; v != nil { + return *v, true + } + return 0, false +} + +func (d *ResponseDirective) Extensions() map[string]string { + return d.extensions +} + +func (d *ResponseDirective) Extension(s string) string { + return d.extensions[s] +} + + diff --git a/vendor/github.com/lestrrat-go/httpcc/httpcc.go b/vendor/github.com/lestrrat-go/httpcc/httpcc.go new file mode 100644 index 0000000000..14679f9b1c --- /dev/null +++ b/vendor/github.com/lestrrat-go/httpcc/httpcc.go @@ -0,0 +1,310 @@ +package httpcc + +import ( + "bufio" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +const ( + // Request Cache-Control directives + MaxAge = "max-age" // used in response as well + MaxStale = "max-stale" + MinFresh = "min-fresh" + NoCache = "no-cache" // used in response as well + NoStore = "no-store" // used in response as well + NoTransform = "no-transform" // used in response as well + OnlyIfCached = "only-if-cached" + + // Response Cache-Control directive + MustRevalidate = "must-revalidate" + Public = "public" + Private = "private" + ProxyRevalidate = "proxy-revalidate" + SMaxAge = "s-maxage" +) + +type TokenPair struct { + Name string + Value string +} + +type TokenValuePolicy int + +const ( + NoArgument TokenValuePolicy = iota + TokenOnly + QuotedStringOnly + AnyTokenValue +) + +type directiveValidator interface { + Validate(string) TokenValuePolicy +} +type directiveValidatorFn func(string) TokenValuePolicy + +func (fn directiveValidatorFn) Validate(ccd string) TokenValuePolicy { + return fn(ccd) +} + +func responseDirectiveValidator(s string) TokenValuePolicy { + switch s { + case MustRevalidate, NoStore, NoTransform, Public, ProxyRevalidate: + return NoArgument + case NoCache, Private: + return QuotedStringOnly + case MaxAge, SMaxAge: + return TokenOnly + default: + return AnyTokenValue + } +} + +func requestDirectiveValidator(s string) TokenValuePolicy { + switch s { + case MaxAge, MaxStale, MinFresh: + return TokenOnly + case NoCache, NoStore, NoTransform, OnlyIfCached: + return NoArgument + default: + return AnyTokenValue + } +} + +// ParseRequestDirective parses a single token. +func ParseRequestDirective(s string) (*TokenPair, error) { + return parseDirective(s, directiveValidatorFn(requestDirectiveValidator)) +} + +func ParseResponseDirective(s string) (*TokenPair, error) { + return parseDirective(s, directiveValidatorFn(responseDirectiveValidator)) +} + +func parseDirective(s string, ccd directiveValidator) (*TokenPair, error) { + s = strings.TrimSpace(s) + + i := strings.IndexByte(s, '=') + if i == -1 { + return &TokenPair{Name: s}, nil + } + + pair := &TokenPair{Name: strings.TrimSpace(s[:i])} + + if len(s) <= i { + // `key=` feels like it's a parse error, but it's HTTP... + // for now, return as if nothing happened. + return pair, nil + } + + v := strings.TrimSpace(s[i+1:]) + switch ccd.Validate(pair.Name) { + case TokenOnly: + if v[0] == '"' { + return nil, fmt.Errorf(`invalid value for %s (quoted string not allowed)`, pair.Name) + } + case QuotedStringOnly: // quoted-string only + if v[0] != '"' { + return nil, fmt.Errorf(`invalid value for %s (bare token not allowed)`, pair.Name) + } + tmp, err := strconv.Unquote(v) + if err != nil { + return nil, fmt.Errorf(`malformed quoted string in token`) + } + v = tmp + case AnyTokenValue: + if v[0] == '"' { + tmp, err := strconv.Unquote(v) + if err != nil { + return nil, fmt.Errorf(`malformed quoted string in token`) + } + v = tmp + } + case NoArgument: + if len(v) > 0 { + return nil, fmt.Errorf(`received argument to directive %s`, pair.Name) + } + } + + pair.Value = v + return pair, nil +} + +func ParseResponseDirectives(s string) ([]*TokenPair, error) { + return parseDirectives(s, ParseResponseDirective) +} + +func ParseRequestDirectives(s string) ([]*TokenPair, error) { + return parseDirectives(s, ParseRequestDirective) +} + +func parseDirectives(s string, p func(string) (*TokenPair, error)) ([]*TokenPair, error) { + scanner := bufio.NewScanner(strings.NewReader(s)) + scanner.Split(scanCommaSeparatedWords) + + var tokens []*TokenPair + for scanner.Scan() { + tok, err := p(scanner.Text()) + if err != nil { + return nil, fmt.Errorf(`failed to parse token #%d: %w`, len(tokens)+1, err) + } + tokens = append(tokens, tok) + } + return tokens, nil +} + +// isSpace reports whether the character is a Unicode white space character. +// We avoid dependency on the unicode package, but check validity of the implementation +// in the tests. +func isSpace(r rune) bool { + if r <= '\u00FF' { + // Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs. + switch r { + case ' ', '\t', '\n', '\v', '\f', '\r': + return true + case '\u0085', '\u00A0': + return true + } + return false + } + // High-valued ones. + if '\u2000' <= r && r <= '\u200a' { + return true + } + switch r { + case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000': + return true + } + return false +} + +func scanCommaSeparatedWords(data []byte, atEOF bool) (advance int, token []byte, err error) { + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !isSpace(r) { + break + } + } + // Scan until we find a comma. Keep track of consecutive whitespaces + // so we remove them from the end result + var ws int + for width, i := 0, start; i < len(data); i += width { + var r rune + r, width = utf8.DecodeRune(data[i:]) + switch { + case isSpace(r): + ws++ + case r == ',': + return i + width, data[start : i-ws], nil + default: + ws = 0 + } + } + + // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. + if atEOF && len(data) > start { + return len(data), data[start : len(data)-ws], nil + } + + // Request more data. + return start, nil, nil +} + +// ParseRequest parses the content of `Cache-Control` header of an HTTP Request. +func ParseRequest(v string) (*RequestDirective, error) { + var dir RequestDirective + tokens, err := ParseRequestDirectives(v) + if err != nil { + return nil, fmt.Errorf(`failed to parse tokens: %w`, err) + } + + for _, token := range tokens { + name := strings.ToLower(token.Name) + switch name { + case MaxAge: + iv, err := strconv.ParseUint(token.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf(`failed to parse max-age: %w`, err) + } + dir.maxAge = &iv + case MaxStale: + iv, err := strconv.ParseUint(token.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf(`failed to parse max-stale: %w`, err) + } + dir.maxStale = &iv + case MinFresh: + iv, err := strconv.ParseUint(token.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf(`failed to parse min-fresh: %w`, err) + } + dir.minFresh = &iv + case NoCache: + dir.noCache = true + case NoStore: + dir.noStore = true + case NoTransform: + dir.noTransform = true + case OnlyIfCached: + dir.onlyIfCached = true + default: + dir.extensions[token.Name] = token.Value + } + } + return &dir, nil +} + +// ParseResponse parses the content of `Cache-Control` header of an HTTP Response. +func ParseResponse(v string) (*ResponseDirective, error) { + tokens, err := ParseResponseDirectives(v) + if err != nil { + return nil, fmt.Errorf(`failed to parse tokens: %w`, err) + } + + var dir ResponseDirective + dir.extensions = make(map[string]string) + for _, token := range tokens { + name := strings.ToLower(token.Name) + switch name { + case MaxAge: + iv, err := strconv.ParseUint(token.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf(`failed to parse max-age: %w`, err) + } + dir.maxAge = &iv + case NoCache: + scanner := bufio.NewScanner(strings.NewReader(token.Value)) + scanner.Split(scanCommaSeparatedWords) + for scanner.Scan() { + dir.noCache = append(dir.noCache, scanner.Text()) + } + case NoStore: + dir.noStore = true + case NoTransform: + dir.noTransform = true + case Public: + dir.public = true + case Private: + scanner := bufio.NewScanner(strings.NewReader(token.Value)) + scanner.Split(scanCommaSeparatedWords) + for scanner.Scan() { + dir.private = append(dir.private, scanner.Text()) + } + case ProxyRevalidate: + dir.proxyRevalidate = true + case SMaxAge: + iv, err := strconv.ParseUint(token.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf(`failed to parse s-maxage: %w`, err) + } + dir.sMaxAge = &iv + default: + dir.extensions[token.Name] = token.Value + } + } + return &dir, nil +} diff --git a/vendor/github.com/lestrrat-go/httprc/.gitignore b/vendor/github.com/lestrrat-go/httprc/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/httprc/.golangci.yml b/vendor/github.com/lestrrat-go/httprc/.golangci.yml new file mode 100644 index 0000000000..8642432169 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/.golangci.yml @@ -0,0 +1,84 @@ +run: + +linters-settings: + govet: + enable-all: true + disable: + - shadow + - fieldalignment + +linters: + enable-all: true + disable: + - cyclop + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - golint #deprecated + - gomnd + - gosec + - govet + - interfacer # deprecated + - ifshort + - ireturn # No, I _LIKE_ returning interfaces + - lll + - maligned # deprecated + - makezero + - nakedret + - nestif + - nlreturn + - paralleltest + - scopelint # deprecated + - tagliatelle + - testpackage + - thelper + - varnamelen # short names are ok + - wrapcheck + - wsl + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - revive + - path: /main.go + linters: + - errcheck + - path: internal/codegen/codegen.go + linters: + - errcheck + - path: /*_test.go + linters: + - errcheck + - forcetypeassert + - path: /*_example_test.go + linters: + - forbidigo + - path: cmd/jwx/jwx.go + linters: + - forbidigo + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + diff --git a/vendor/github.com/lestrrat-go/httprc/Changes b/vendor/github.com/lestrrat-go/httprc/Changes new file mode 100644 index 0000000000..e2629fdd78 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/Changes @@ -0,0 +1,17 @@ +Changes +======= + +v1.0.4 19 Jul 2022 + * Fix sloppy API breakage + +v1.0.3 19 Jul 2022 + * Fix queue insertion in the middle of the queue (#7) + +v1.0.2 13 Jun 2022 + * Properly release a lock when the fetch fails (#5) + +v1.0.1 29 Mar 2022 + * Bump dependency for github.com/lestrrat-go/httpcc to v1.0.1 + +v1.0.0 29 Mar 2022 + * Initial release, refactored out of github.com/lestrrat-go/jwx diff --git a/vendor/github.com/lestrrat-go/httprc/LICENSE b/vendor/github.com/lestrrat-go/httprc/LICENSE new file mode 100644 index 0000000000..3e196892ca --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/httprc/README.md b/vendor/github.com/lestrrat-go/httprc/README.md new file mode 100644 index 0000000000..1583806520 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/README.md @@ -0,0 +1,130 @@ +# httprc + +`httprc` is a HTTP "Refresh" Cache. Its aim is to cache a remote resource that +can be fetched via HTTP, but keep the cached content up-to-date based on periodic +refreshing. + +# SYNOPSIS + + +```go +package httprc_test + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "sync" + "time" + + "github.com/lestrrat-go/httprc" +) + +const ( + helloWorld = `Hello World!` + goodbyeWorld = `Goodbye World!` +) + +func ExampleCache() { + var mu sync.RWMutex + + msg := helloWorld + + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(`Cache-Control`, fmt.Sprintf(`max-age=%d`, 2)) + w.WriteHeader(http.StatusOK) + mu.RLock() + fmt.Fprint(w, msg) + mu.RUnlock() + })) + defer srv.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + errSink := httprc.ErrSinkFunc(func(err error) { + fmt.Printf("%s\n", err) + }) + + c := httprc.NewCache(ctx, + httprc.WithErrSink(errSink), + httprc.WithRefreshWindow(time.Second), // force checks every second + ) + + c.Register(srv.URL, + httprc.WithHTTPClient(srv.Client()), // we need client with TLS settings + httprc.WithMinRefreshInterval(time.Second), // allow max-age=1 (smallest) + ) + + payload, err := c.Get(ctx, srv.URL) + if err != nil { + fmt.Printf("%s\n", err) + return + } + + if string(payload.([]byte)) != helloWorld { + fmt.Printf("payload mismatch: %s\n", payload) + return + } + + mu.Lock() + msg = goodbyeWorld + mu.Unlock() + + time.Sleep(4 * time.Second) + + payload, err = c.Get(ctx, srv.URL) + if err != nil { + fmt.Printf("%s\n", err) + return + } + + if string(payload.([]byte)) != goodbyeWorld { + fmt.Printf("payload mismatch: %s\n", payload) + return + } + + cancel() + + // OUTPUT: +} +``` +source: [httprc_example_test.go](https://github.com/lestrrat-go/jwx/blob/main/httprc_example_test.go) + + +# Sequence Diagram + +```mermaid +sequenceDiagram + autonumber + actor User + participant httprc.Cache + participant httprc.Storage + User->>httprc.Cache: Fetch URL `u` + activate httprc.Storage + httprc.Cache->>httprc.Storage: Fetch local cache for `u` + alt Cache exists + httprc.Storage-->httprc.Cache: Return local cache + httprc.Cache-->>User: Return data + Note over httprc.Storage: If the cache exists, there's nothing more to do.
The cached content will be updated periodically in httprc.Refresher + deactivate httprc.Storage + else Cache does not exist + activate httprc.Fetcher + httprc.Cache->>httprc.Fetcher: Fetch remote resource `u` + httprc.Fetcher-->>httprc.Cache: Return fetched data + deactivate httprc.Fetcher + httprc.Cache-->>User: Return data + httprc.Cache-)httprc.Refresher: Enqueue into auto-refresh queue + activate httprc.Refresher + loop Refresh Loop + Note over httprc.Storage,httprc.Fetcher: Cached contents are updated synchronously + httprc.Refresher->>httprc.Refresher: Wait until next refresh + httprc.Refresher-->>httprc.Fetcher: Request fetch + httprc.Fetcher->>httprc.Refresher: Return fetched data + httprc.Refresher-->>httprc.Storage: Store new version in cache + httprc.Refresher->>httprc.Refresher: Enqueue into auto-refresh queue (again) + end + deactivate httprc.Refresher + end +``` diff --git a/vendor/github.com/lestrrat-go/httprc/cache.go b/vendor/github.com/lestrrat-go/httprc/cache.go new file mode 100644 index 0000000000..505e5ae446 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/cache.go @@ -0,0 +1,172 @@ +package httprc + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" +) + +// ErrSink is an abstraction that allows users to consume errors +// produced while the cache queue is running. +type HTTPClient interface { + Get(string) (*http.Response, error) +} + +// Cache represents a cache that stores resources locally, while +// periodically refreshing the contents based on HTTP header values +// and/or user-supplied hints. +// +// Refresh is performed _periodically_, and therefore the contents +// are not kept up-to-date in real time. The interval between checks +// for refreshes is called the refresh window. +// +// The default refresh window is 15 minutes. This means that if a +// resource is fetched is at time T, and it is supposed to be +// refreshed in 20 minutes, the next refresh for this resource will +// happen at T+30 minutes (15+15 minutes). +type Cache struct { + mu sync.RWMutex + queue *queue + wl Whitelist +} + +const defaultRefreshWindow = 15 * time.Minute + +// New creates a new Cache object. +// +// The context object in the argument controls the life-cycle of the +// auto-refresh worker. If you cancel the `ctx`, then the automatic +// refresh will stop working. +// +// Refresh will only be performed periodically where the interval between +// refreshes are controlled by the `refresh window` variable. For example, +// if the refresh window is every 5 minutes and the resource was queued +// to be refreshed at 7 minutes, the resource will be refreshed after 10 +// minutes (in 2 refresh window time). +// +// The refresh window can be configured by using `httprc.WithRefreshWindow` +// option. If you want refreshes to be performed more often, provide a smaller +// refresh window. If you specify a refresh window that is smaller than 1 +// second, it will automatically be set to the default value, which is 15 +// minutes. +// +// Internally the HTTP fetching is done using a pool of HTTP fetch +// workers. The default number of workers is 3. You may change this +// number by specifying the `httprc.WithFetcherWorkerCount` +func NewCache(ctx context.Context, options ...CacheOption) *Cache { + var refreshWindow time.Duration + var errSink ErrSink + var wl Whitelist + var fetcherOptions []FetcherOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identRefreshWindow{}: + refreshWindow = option.Value().(time.Duration) + case identFetcherWorkerCount{}, identWhitelist{}: + fetcherOptions = append(fetcherOptions, option) + case identErrSink{}: + errSink = option.Value().(ErrSink) + } + } + + if refreshWindow < time.Second { + refreshWindow = defaultRefreshWindow + } + + fetch := NewFetcher(ctx, fetcherOptions...) + queue := newQueue(ctx, refreshWindow, fetch, errSink) + + return &Cache{ + queue: queue, + wl: wl, + } +} + +// Register configures a URL to be stored in the cache. +// +// For any given URL, the URL must be registered _BEFORE_ it is +// accessed using `Get()` method. +func (c *Cache) Register(u string, options ...RegisterOption) error { + c.mu.Lock() + defer c.mu.Unlock() + + if wl := c.wl; wl != nil { + if !wl.IsAllowed(u) { + return fmt.Errorf(`httprc.Cache: url %q has been rejected by whitelist`, u) + } + } + + return c.queue.Register(u, options...) +} + +// Unregister removes the given URL `u` from the cache. +// +// Subsequent calls to `Get()` will fail until `u` is registered again. +func (c *Cache) Unregister(u string) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.queue.Unregister(u) +} + +// IsRegistered returns true if the given URL `u` has already been +// registered in the cache. +func (c *Cache) IsRegistered(u string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.queue.IsRegistered(u) +} + +// Refresh is identical to Get(), except it always fetches the +// specified resource anew, and updates the cached content +func (c *Cache) Refresh(ctx context.Context, u string) (interface{}, error) { + return c.getOrFetch(ctx, u, true) +} + +// Get returns the cached object. +// +// The context.Context argument is used to control the timeout for +// synchronous fetches, when they need to happen. Synchronous fetches +// will be performed when the cache does not contain the specified +// resource. +func (c *Cache) Get(ctx context.Context, u string) (interface{}, error) { + return c.getOrFetch(ctx, u, false) +} + +func (c *Cache) getOrFetch(ctx context.Context, u string, forceRefresh bool) (interface{}, error) { + c.mu.RLock() + e, ok := c.queue.getRegistered(u) + if !ok { + c.mu.RUnlock() + return nil, fmt.Errorf(`url %q is not registered (did you make sure to call Register() first?)`, u) + } + c.mu.RUnlock() + + // Only one goroutine may enter this section. + e.acquireSem() + + // has this entry been fetched? (but ignore and do a fetch + // if forceRefresh is true) + if forceRefresh || !e.hasBeenFetched() { + if err := c.queue.fetchAndStore(ctx, e); err != nil { + e.releaseSem() + return nil, fmt.Errorf(`failed to fetch %q: %w`, u, err) + } + } + + e.releaseSem() + + e.mu.RLock() + data := e.data + e.mu.RUnlock() + + return data, nil +} + +func (c *Cache) Snapshot() *Snapshot { + c.mu.RLock() + defer c.mu.RUnlock() + return c.queue.snapshot() +} diff --git a/vendor/github.com/lestrrat-go/httprc/fetcher.go b/vendor/github.com/lestrrat-go/httprc/fetcher.go new file mode 100644 index 0000000000..0bce87a01b --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/fetcher.go @@ -0,0 +1,182 @@ +package httprc + +import ( + "context" + "fmt" + "net/http" + "sync" +) + +type fetchRequest struct { + mu sync.RWMutex + + // client contains the HTTP Client that can be used to make a + // request. By setting a custom *http.Client, you can for example + // provide a custom http.Transport + // + // If not specified, http.DefaultClient will be used. + client HTTPClient + + wl Whitelist + + // u contains the URL to be fetched + url string + + // reply is a field that is only used by the internals of the fetcher + // it is used to return the result of fetching + reply chan *fetchResult +} + +type fetchResult struct { + mu sync.RWMutex + res *http.Response + err error +} + +func (fr *fetchResult) reply(ctx context.Context, reply chan *fetchResult) error { + select { + case <-ctx.Done(): + return ctx.Err() + case reply <- fr: + } + + close(reply) + return nil +} + +type fetcher struct { + requests chan *fetchRequest +} + +type Fetcher interface { + Fetch(context.Context, string, ...FetchOption) (*http.Response, error) + fetch(context.Context, *fetchRequest) (*http.Response, error) +} + +func NewFetcher(ctx context.Context, options ...FetcherOption) Fetcher { + var nworkers int + var wl Whitelist + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identFetcherWorkerCount{}: + nworkers = option.Value().(int) + case identWhitelist{}: + wl = option.Value().(Whitelist) + } + } + + if nworkers < 1 { + nworkers = 3 + } + + incoming := make(chan *fetchRequest) + for i := 0; i < nworkers; i++ { + go runFetchWorker(ctx, incoming, wl) + } + return &fetcher{ + requests: incoming, + } +} + +func (f *fetcher) Fetch(ctx context.Context, u string, options ...FetchOption) (*http.Response, error) { + var client HTTPClient + var wl Whitelist + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identHTTPClient{}: + client = option.Value().(HTTPClient) + case identWhitelist{}: + wl = option.Value().(Whitelist) + } + } + + req := fetchRequest{ + client: client, + url: u, + wl: wl, + } + + return f.fetch(ctx, &req) +} + +// fetch (unexported) is the main fetching implemntation. +// it allows the caller to reuse the same *fetchRequest object +func (f *fetcher) fetch(ctx context.Context, req *fetchRequest) (*http.Response, error) { + reply := make(chan *fetchResult, 1) + req.mu.Lock() + req.reply = reply + req.mu.Unlock() + + // Send a request to the backend + select { + case <-ctx.Done(): + return nil, ctx.Err() + case f.requests <- req: + } + + // wait until we get a reply + select { + case <-ctx.Done(): + return nil, ctx.Err() + case fr := <-reply: + fr.mu.RLock() + res := fr.res + err := fr.err + fr.mu.RUnlock() + return res, err + } +} + +func runFetchWorker(ctx context.Context, incoming chan *fetchRequest, wl Whitelist) { +LOOP: + for { + select { + case <-ctx.Done(): + break LOOP + case req := <-incoming: + req.mu.RLock() + reply := req.reply + client := req.client + if client == nil { + client = http.DefaultClient + } + url := req.url + reqwl := req.wl + req.mu.RUnlock() + + var wls []Whitelist + for _, v := range []Whitelist{wl, reqwl} { + if v != nil { + wls = append(wls, v) + } + } + + if len(wls) > 0 { + for _, wl := range wls { + if !wl.IsAllowed(url) { + r := &fetchResult{ + err: fmt.Errorf(`fetching url %q rejected by whitelist`, url), + } + if err := r.reply(ctx, reply); err != nil { + break LOOP + } + continue LOOP + } + } + } + + // The body is handled by the consumer of the fetcher + //nolint:bodyclose + res, err := client.Get(url) + r := &fetchResult{ + res: res, + err: err, + } + if err := r.reply(ctx, reply); err != nil { + break LOOP + } + } + } +} diff --git a/vendor/github.com/lestrrat-go/httprc/httprc.go b/vendor/github.com/lestrrat-go/httprc/httprc.go new file mode 100644 index 0000000000..8ae056a7e1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/httprc.go @@ -0,0 +1,22 @@ +//go:generate tools/genoptions.sh + +// Package httprc implements a cache for resources available +// over http(s). Its aim is not only to cache these resources so +// that it saves on HTTP roundtrips, but it also periodically +// attempts to auto-refresh these resources once they are cached +// based on the user-specified intervals and HTTP `Expires` and +// `Cache-Control` headers, thus keeping the entries _relatively_ fresh. +package httprc + +import "fmt" + +// RefreshError is the underlying error type that is sent to +// the `httprc.ErrSink` objects +type RefreshError struct { + URL string + Err error +} + +func (re *RefreshError) Error() string { + return fmt.Sprintf(`refresh error (%q): %s`, re.URL, re.Err) +} diff --git a/vendor/github.com/lestrrat-go/httprc/options.yaml b/vendor/github.com/lestrrat-go/httprc/options.yaml new file mode 100644 index 0000000000..5a5139cb8a --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/options.yaml @@ -0,0 +1,119 @@ +package_name: httprc +output: options_gen.go +interfaces: + - name: RegisterOption + comment: | + RegisterOption desribes options that can be passed to `(httprc.Cache).Register()` + - name: CacheOption + comment: | + CacheOption desribes options that can be passed to `New()` + - name: FetcherOption + methods: + - cacheOption + comment: | + FetcherOption describes options that can be passed to `(httprc.Fetcher).NewFetcher()` + - name: FetchOption + comment: | + FetchOption describes options that can be passed to `(httprc.Fetcher).Fetch()` + - name: FetchRegisterOption + methods: + - fetchOption + - registerOption + - name: FetchFetcherRegisterOption + methods: + - fetchOption + - fetcherOption + - registerOption +options: + - ident: FetcherWorkerCount + interface: FetcherOption + argument_type: int + comment: | + WithFetchWorkerCount specifies the number of HTTP fetch workers that are spawned + in the backend. By default 3 workers are spawned. + - ident: Whitelist + interface: FetchFetcherRegisterOption + argument_type: Whitelist + comment: | + WithWhitelist specifies the Whitelist object that can control which URLs are + allowed to be processed. + + It can be passed to `httprc.NewCache` as a whitelist applied to all + URLs that are fetched by the cache, or it can be passed on a per-URL + basis using `(httprc.Cache).Register()`. If both are specified, + the url must fulfill _both_ the cache-wide whitelist and the per-URL + whitelist. + - ident: Transformer + interface: RegisterOption + argument_type: Transformer + comment: | + WithTransformer specifies the `httprc.Transformer` object that should be applied + to the fetched resource. The `Transform()` method is only called if the HTTP request + returns a `200 OK` status. + - ident: HTTPClient + interface: FetchRegisterOption + argument_type: HTTPClient + comment: | + WithHTTPClient specififes the HTTP Client object that should be used to fetch + the resource. For example, if you need an `*http.Client` instance that requires + special TLS or Authorization setup, you might want to pass it using this option. + - ident: MinRefreshInterval + interface: RegisterOption + argument_type: time.Duration + comment: | + WithMinRefreshInterval specifies the minimum refresh interval to be used. + + When we fetch the key from a remote URL, we first look at the `max-age` + directive from `Cache-Control` response header. If this value is present, + we compare the `max-age` value and the value specified by this option + and take the larger one (e.g. if `max-age` = 5 minutes and `min refresh` = 10 + minutes, then next fetch will happen in 10 minutes) + + Next we check for the `Expires` header, and similarly if the header is + present, we compare it against the value specified by this option, + and take the larger one. + + Finally, if neither of the above headers are present, we use the + value specified by this option as the interval until the next refresh. + + If unspecified, the minimum refresh interval is 1 hour. + + This value and the header values are ignored if `WithRefreshInterval` is specified. + - ident: RefreshInterval + interface: RegisterOption + argument_type: time.Duration + comment: | + WithRefreshInterval specifies the static interval between refreshes + of resources controlled by `httprc.Cache`. + + Providing this option overrides the adaptive token refreshing based + on Cache-Control/Expires header (and `httprc.WithMinRefreshInterval`), + and refreshes will *always* happen in this interval. + + You generally do not want to make this value too small, as it can easily + be considered a DoS attack, and there is no backoff mechanism for failed + attempts. + - ident: RefreshWindow + interface: CacheOption + argument_type: time.Duration + comment: | + WithRefreshWindow specifies the interval between checks for refreshes. + `httprc.Cache` does not check for refreshes in exact intervals. Instead, + it wakes up at every tick that occurs in the interval specified by + `WithRefreshWindow` option, and refreshes all entries that need to be + refreshed within this window. + + The default value is 15 minutes. + + You generally do not want to make this value too small, as it can easily + be considered a DoS attack, and there is no backoff mechanism for failed + attempts. + - ident: ErrSink + interface: CacheOption + argument_type: ErrSink + comment: | + WithErrSink specifies the `httprc.ErrSink` object that handles errors + that occurred during the cache's execution. For example, you will be + able to intercept errors that occurred during the execution of Transformers. + + diff --git a/vendor/github.com/lestrrat-go/httprc/options_gen.go b/vendor/github.com/lestrrat-go/httprc/options_gen.go new file mode 100644 index 0000000000..daaf65f951 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/options_gen.go @@ -0,0 +1,221 @@ +// This file is auto-generated by github.com/lestrrat-go/option/cmd/genoptions. DO NOT EDIT + +package httprc + +import ( + "time" + + "github.com/lestrrat-go/option" +) + +type Option = option.Interface + +// CacheOption desribes options that can be passed to `New()` +type CacheOption interface { + Option + cacheOption() +} + +type cacheOption struct { + Option +} + +func (*cacheOption) cacheOption() {} + +type FetchFetcherRegisterOption interface { + Option + fetchOption() + fetcherOption() + registerOption() +} + +type fetchFetcherRegisterOption struct { + Option +} + +func (*fetchFetcherRegisterOption) fetchOption() {} + +func (*fetchFetcherRegisterOption) fetcherOption() {} + +func (*fetchFetcherRegisterOption) registerOption() {} + +// FetchOption describes options that can be passed to `(httprc.Fetcher).Fetch()` +type FetchOption interface { + Option + fetchOption() +} + +type fetchOption struct { + Option +} + +func (*fetchOption) fetchOption() {} + +type FetchRegisterOption interface { + Option + fetchOption() + registerOption() +} + +type fetchRegisterOption struct { + Option +} + +func (*fetchRegisterOption) fetchOption() {} + +func (*fetchRegisterOption) registerOption() {} + +// FetcherOption describes options that can be passed to `(httprc.Fetcher).NewFetcher()` +type FetcherOption interface { + Option + cacheOption() +} + +type fetcherOption struct { + Option +} + +func (*fetcherOption) cacheOption() {} + +// RegisterOption desribes options that can be passed to `(httprc.Cache).Register()` +type RegisterOption interface { + Option + registerOption() +} + +type registerOption struct { + Option +} + +func (*registerOption) registerOption() {} + +type identErrSink struct{} +type identFetcherWorkerCount struct{} +type identHTTPClient struct{} +type identMinRefreshInterval struct{} +type identRefreshInterval struct{} +type identRefreshWindow struct{} +type identTransformer struct{} +type identWhitelist struct{} + +func (identErrSink) String() string { + return "WithErrSink" +} + +func (identFetcherWorkerCount) String() string { + return "WithFetcherWorkerCount" +} + +func (identHTTPClient) String() string { + return "WithHTTPClient" +} + +func (identMinRefreshInterval) String() string { + return "WithMinRefreshInterval" +} + +func (identRefreshInterval) String() string { + return "WithRefreshInterval" +} + +func (identRefreshWindow) String() string { + return "WithRefreshWindow" +} + +func (identTransformer) String() string { + return "WithTransformer" +} + +func (identWhitelist) String() string { + return "WithWhitelist" +} + +// WithErrSink specifies the `httprc.ErrSink` object that handles errors +// that occurred during the cache's execution. For example, you will be +// able to intercept errors that occurred during the execution of Transformers. +func WithErrSink(v ErrSink) CacheOption { + return &cacheOption{option.New(identErrSink{}, v)} +} + +// WithFetchWorkerCount specifies the number of HTTP fetch workers that are spawned +// in the backend. By default 3 workers are spawned. +func WithFetcherWorkerCount(v int) FetcherOption { + return &fetcherOption{option.New(identFetcherWorkerCount{}, v)} +} + +// WithHTTPClient specififes the HTTP Client object that should be used to fetch +// the resource. For example, if you need an `*http.Client` instance that requires +// special TLS or Authorization setup, you might want to pass it using this option. +func WithHTTPClient(v HTTPClient) FetchRegisterOption { + return &fetchRegisterOption{option.New(identHTTPClient{}, v)} +} + +// WithMinRefreshInterval specifies the minimum refresh interval to be used. +// +// When we fetch the key from a remote URL, we first look at the `max-age` +// directive from `Cache-Control` response header. If this value is present, +// we compare the `max-age` value and the value specified by this option +// and take the larger one (e.g. if `max-age` = 5 minutes and `min refresh` = 10 +// minutes, then next fetch will happen in 10 minutes) +// +// Next we check for the `Expires` header, and similarly if the header is +// present, we compare it against the value specified by this option, +// and take the larger one. +// +// Finally, if neither of the above headers are present, we use the +// value specified by this option as the interval until the next refresh. +// +// If unspecified, the minimum refresh interval is 1 hour. +// +// This value and the header values are ignored if `WithRefreshInterval` is specified. +func WithMinRefreshInterval(v time.Duration) RegisterOption { + return ®isterOption{option.New(identMinRefreshInterval{}, v)} +} + +// WithRefreshInterval specifies the static interval between refreshes +// of resources controlled by `httprc.Cache`. +// +// Providing this option overrides the adaptive token refreshing based +// on Cache-Control/Expires header (and `httprc.WithMinRefreshInterval`), +// and refreshes will *always* happen in this interval. +// +// You generally do not want to make this value too small, as it can easily +// be considered a DoS attack, and there is no backoff mechanism for failed +// attempts. +func WithRefreshInterval(v time.Duration) RegisterOption { + return ®isterOption{option.New(identRefreshInterval{}, v)} +} + +// WithRefreshWindow specifies the interval between checks for refreshes. +// `httprc.Cache` does not check for refreshes in exact intervals. Instead, +// it wakes up at every tick that occurs in the interval specified by +// `WithRefreshWindow` option, and refreshes all entries that need to be +// refreshed within this window. +// +// The default value is 15 minutes. +// +// You generally do not want to make this value too small, as it can easily +// be considered a DoS attack, and there is no backoff mechanism for failed +// attempts. +func WithRefreshWindow(v time.Duration) CacheOption { + return &cacheOption{option.New(identRefreshWindow{}, v)} +} + +// WithTransformer specifies the `httprc.Transformer` object that should be applied +// to the fetched resource. The `Transform()` method is only called if the HTTP request +// returns a `200 OK` status. +func WithTransformer(v Transformer) RegisterOption { + return ®isterOption{option.New(identTransformer{}, v)} +} + +// WithWhitelist specifies the Whitelist object that can control which URLs are +// allowed to be processed. +// +// It can be passed to `httprc.NewCache` as a whitelist applied to all +// URLs that are fetched by the cache, or it can be passed on a per-URL +// basis using `(httprc.Cache).Register()`. If both are specified, +// the url must fulfill _both_ the cache-wide whitelist and the per-URL +// whitelist. +func WithWhitelist(v Whitelist) FetchFetcherRegisterOption { + return &fetchFetcherRegisterOption{option.New(identWhitelist{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/httprc/queue.go b/vendor/github.com/lestrrat-go/httprc/queue.go new file mode 100644 index 0000000000..897207b7d2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/queue.go @@ -0,0 +1,459 @@ +package httprc + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/lestrrat-go/httpcc" +) + +// ErrSink is an abstraction that allows users to consume errors +// produced while the cache queue is running. +type ErrSink interface { + // Error accepts errors produced during the cache queue's execution. + // The method should never block, otherwise the fetch loop may be + // paused for a prolonged amount of time. + Error(error) +} + +type ErrSinkFunc func(err error) + +func (f ErrSinkFunc) Error(err error) { + f(err) +} + +// Transformer is responsible for converting an HTTP response +// into an appropriate form of your choosing. +type Transformer interface { + // Transform receives an HTTP response object, and should + // return an appropriate object that suits your needs. + // + // If you happen to use the response body, you are responsible + // for closing the body + Transform(string, *http.Response) (interface{}, error) +} + +type TransformFunc func(string, *http.Response) (interface{}, error) + +func (f TransformFunc) Transform(u string, res *http.Response) (interface{}, error) { + return f(u, res) +} + +// BodyBytes is the default Transformer applied to all resources. +// It takes an *http.Response object and extracts the body +// of the response as `[]byte` +type BodyBytes struct{} + +func (BodyBytes) Transform(_ string, res *http.Response) (interface{}, error) { + buf, err := ioutil.ReadAll(res.Body) + defer res.Body.Close() + if err != nil { + return nil, fmt.Errorf(`failed to read response body: %w`, err) + } + + return buf, nil +} + +type rqentry struct { + fireAt time.Time + url string +} + +// entry represents a resource to be fetched over HTTP, +// long with optional specifications such as the *http.Client +// object to use. +type entry struct { + mu sync.RWMutex + sem chan struct{} + + lastFetch time.Time + + // Interval between refreshes are calculated two ways. + // 1) You can set an explicit refresh interval by using WithRefreshInterval(). + // In this mode, it doesn't matter what the HTTP response says in its + // Cache-Control or Expires headers + // 2) You can let us calculate the time-to-refresh based on the key's + // Cache-Control or Expires headers. + // First, the user provides us the absolute minimum interval before + // refreshes. We will never check for refreshes before this specified + // amount of time. + // + // Next, max-age directive in the Cache-Control header is consulted. + // If `max-age` is not present, we skip the following section, and + // proceed to the next option. + // If `max-age > user-supplied minimum interval`, then we use the max-age, + // otherwise the user-supplied minimum interval is used. + // + // Next, the value specified in Expires header is consulted. + // If the header is not present, we skip the following seciont and + // proceed to the next option. + // We take the time until expiration `expires - time.Now()`, and + // if `time-until-expiration > user-supplied minimum interval`, then + // we use the expires value, otherwise the user-supplied minimum interval is used. + // + // If all of the above fails, we used the user-supplied minimum interval + refreshInterval time.Duration + minRefreshInterval time.Duration + + request *fetchRequest + + transform Transformer + data interface{} +} + +func (e *entry) acquireSem() { + e.sem <- struct{}{} +} + +func (e *entry) releaseSem() { + <-e.sem +} + +func (e *entry) hasBeenFetched() bool { + e.mu.RLock() + defer e.mu.RUnlock() + return !e.lastFetch.IsZero() +} + +// queue is responsible for updating the contents of the storage +type queue struct { + mu sync.RWMutex + registry map[string]*entry + windowSize time.Duration + fetch Fetcher + fetchCond *sync.Cond + fetchQueue []*rqentry + + // list is a sorted list of urls to their expected fire time + // when we get a new tick in the RQ loop, we process everything + // that can be fired up to the point the tick was called + list []*rqentry + + // clock is really only used by testing + clock interface { + Now() time.Time + } +} + +type clockFunc func() time.Time + +func (cf clockFunc) Now() time.Time { + return cf() +} + +func newQueue(ctx context.Context, window time.Duration, fetch Fetcher, errSink ErrSink) *queue { + fetchLocker := &sync.Mutex{} + rq := &queue{ + windowSize: window, + fetch: fetch, + fetchCond: sync.NewCond(fetchLocker), + registry: make(map[string]*entry), + clock: clockFunc(time.Now), + } + + go rq.refreshLoop(ctx, errSink) + + return rq +} + +func (q *queue) Register(u string, options ...RegisterOption) error { + var refreshInterval time.Duration + var client HTTPClient + var wl Whitelist + var transform Transformer = BodyBytes{} + + minRefreshInterval := 15 * time.Minute + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identHTTPClient{}: + client = option.Value().(HTTPClient) + case identRefreshInterval{}: + refreshInterval = option.Value().(time.Duration) + case identMinRefreshInterval{}: + minRefreshInterval = option.Value().(time.Duration) + case identTransformer{}: + transform = option.Value().(Transformer) + case identWhitelist{}: + wl = option.Value().(Whitelist) + } + } + + q.mu.RLock() + rWindow := q.windowSize + q.mu.RUnlock() + + if refreshInterval > 0 && refreshInterval < rWindow { + return fmt.Errorf(`refresh interval (%s) is smaller than refresh window (%s): this will not as expected`, refreshInterval, rWindow) + } + + e := entry{ + sem: make(chan struct{}, 1), + minRefreshInterval: minRefreshInterval, + transform: transform, + refreshInterval: refreshInterval, + request: &fetchRequest{ + client: client, + url: u, + wl: wl, + }, + } + q.mu.Lock() + q.registry[u] = &e + q.mu.Unlock() + return nil +} + +func (q *queue) Unregister(u string) error { + q.mu.Lock() + defer q.mu.Unlock() + _, ok := q.registry[u] + if !ok { + return fmt.Errorf(`url %q has not been registered`, u) + } + delete(q.registry, u) + return nil +} + +func (q *queue) getRegistered(u string) (*entry, bool) { + q.mu.RLock() + e, ok := q.registry[u] + q.mu.RUnlock() + + return e, ok +} + +func (q *queue) IsRegistered(u string) bool { + _, ok := q.getRegistered(u) + return ok +} + +func (q *queue) fetchLoop(ctx context.Context, errSink ErrSink) { + for { + q.fetchCond.L.Lock() + for len(q.fetchQueue) <= 0 { + select { + case <-ctx.Done(): + return + default: + q.fetchCond.Wait() + } + } + list := make([]*rqentry, len(q.fetchQueue)) + copy(list, q.fetchQueue) + q.fetchQueue = q.fetchQueue[:0] + q.fetchCond.L.Unlock() + + for _, rq := range list { + select { + case <-ctx.Done(): + return + default: + } + + e, ok := q.getRegistered(rq.url) + if !ok { + continue + } + if err := q.fetchAndStore(ctx, e); err != nil { + if errSink != nil { + errSink.Error(&RefreshError{ + URL: rq.url, + Err: err, + }) + } + } + } + } +} + +// This loop is responsible for periodically updating the cached content +func (q *queue) refreshLoop(ctx context.Context, errSink ErrSink) { + // Tick every q.windowSize duration. + ticker := time.NewTicker(q.windowSize) + + go q.fetchLoop(ctx, errSink) + defer q.fetchCond.Signal() + + for { + select { + case <-ctx.Done(): + return + case t := <-ticker.C: + t = t.Round(time.Second) + // To avoid getting stuck here, we just copy the relevant + // items, and release the lock within this critical section + var list []*rqentry + q.mu.Lock() + var max int + for i, r := range q.list { + if r.fireAt.Before(t) || r.fireAt.Equal(t) { + max = i + list = append(list, r) + continue + } + break + } + + if len(list) > 0 { + q.list = q.list[max+1:] + } + q.mu.Unlock() // release lock + + if len(list) > 0 { + // Now we need to fetch these, but do this elsewhere so + // that we don't block this main loop + q.fetchCond.L.Lock() + q.fetchQueue = append(q.fetchQueue, list...) + q.fetchCond.L.Unlock() + q.fetchCond.Signal() + } + } + } +} + +func (q *queue) fetchAndStore(ctx context.Context, e *entry) error { + e.mu.Lock() + defer e.mu.Unlock() + + // synchronously go fetch + e.lastFetch = time.Now() + res, err := q.fetch.fetch(ctx, e.request) + if err != nil { + // Even if the request failed, we need to queue the next fetch + q.enqueueNextFetch(nil, e) + return fmt.Errorf(`failed to fetch %q: %w`, e.request.url, err) + } + + q.enqueueNextFetch(res, e) + + data, err := e.transform.Transform(e.request.url, res) + if err != nil { + return fmt.Errorf(`failed to transform HTTP response for %q: %w`, e.request.url, err) + } + e.data = data + + return nil +} + +func (q *queue) Enqueue(u string, interval time.Duration) error { + fireAt := q.clock.Now().Add(interval).Round(time.Second) + + q.mu.Lock() + defer q.mu.Unlock() + + list := q.list + + ll := len(list) + if ll == 0 || list[ll-1].fireAt.Before(fireAt) { + list = append(list, &rqentry{ + fireAt: fireAt, + url: u, + }) + } else { + for i := 0; i < ll; i++ { + if i == ll-1 || list[i].fireAt.After(fireAt) { + // insert here + list = append(list[:i+1], list[i:]...) + list[i] = &rqentry{fireAt: fireAt, url: u} + break + } + } + } + + q.list = list + return nil +} + +func (q *queue) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.WriteString(`{"list":[`) + q.mu.RLock() + for i, e := range q.list { + if i > 0 { + buf.WriteByte(',') + } + fmt.Fprintf(&buf, `{"fire_at":%q,"url":%q}`, e.fireAt.Format(time.RFC3339), e.url) + } + q.mu.RUnlock() + buf.WriteString(`]}`) + return buf.Bytes(), nil +} + +func (q *queue) enqueueNextFetch(res *http.Response, e *entry) { + dur := calculateRefreshDuration(res, e) + // TODO send to error sink + _ = q.Enqueue(e.request.url, dur) +} + +func calculateRefreshDuration(res *http.Response, e *entry) time.Duration { + if e.refreshInterval > 0 { + return e.refreshInterval + } + + if res != nil { + if v := res.Header.Get(`Cache-Control`); v != "" { + dir, err := httpcc.ParseResponse(v) + if err == nil { + maxAge, ok := dir.MaxAge() + if ok { + resDuration := time.Duration(maxAge) * time.Second + if resDuration > e.minRefreshInterval { + return resDuration + } + return e.minRefreshInterval + } + // fallthrough + } + // fallthrough + } + + if v := res.Header.Get(`Expires`); v != "" { + expires, err := http.ParseTime(v) + if err == nil { + resDuration := time.Until(expires) + if resDuration > e.minRefreshInterval { + return resDuration + } + return e.minRefreshInterval + } + // fallthrough + } + } + + // Previous fallthroughs are a little redandunt, but hey, it's all good. + return e.minRefreshInterval +} + +type SnapshotEntry struct { + URL string `json:"url"` + Data interface{} `json:"data"` + LastFetched time.Time `json:"last_fetched"` +} +type Snapshot struct { + Entries []SnapshotEntry `json:"entries"` +} + +// Snapshot returns the contents of the cache at the given moment. +func (q *queue) snapshot() *Snapshot { + q.mu.RLock() + list := make([]SnapshotEntry, 0, len(q.registry)) + + for url, e := range q.registry { + list = append(list, SnapshotEntry{ + URL: url, + LastFetched: e.lastFetch, + Data: e.data, + }) + } + q.mu.RUnlock() + + return &Snapshot{ + Entries: list, + } +} diff --git a/vendor/github.com/lestrrat-go/httprc/whitelist.go b/vendor/github.com/lestrrat-go/httprc/whitelist.go new file mode 100644 index 0000000000..b80332a6cd --- /dev/null +++ b/vendor/github.com/lestrrat-go/httprc/whitelist.go @@ -0,0 +1,73 @@ +package httprc + +import "regexp" + +// Whitelist is an interface for a set of URL whitelists. When provided +// to fetching operations, urls are checked against this object, and +// the object must return true for urls to be fetched. +type Whitelist interface { + IsAllowed(string) bool +} + +// InsecureWhitelist allows any URLs to be fetched. +type InsecureWhitelist struct{} + +func (InsecureWhitelist) IsAllowed(string) bool { + return true +} + +// RegexpWhitelist is a httprc.Whitelist object comprised of a list of *regexp.Regexp +// objects. All entries in the list are tried until one matches. If none of the +// *regexp.Regexp objects match, then the URL is deemed unallowed. +type RegexpWhitelist struct { + patterns []*regexp.Regexp +} + +func NewRegexpWhitelist() *RegexpWhitelist { + return &RegexpWhitelist{} +} + +func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist { + w.patterns = append(w.patterns, pat) + return w +} + +// IsAlloed returns true if any of the patterns in the whitelist +// returns true. +func (w *RegexpWhitelist) IsAllowed(u string) bool { + for _, pat := range w.patterns { + if pat.MatchString(u) { + return true + } + } + return false +} + +// MapWhitelist is a httprc.Whitelist object comprised of a map of strings. +// If the URL exists in the map, then the URL is allowed to be fetched. +type MapWhitelist struct { + store map[string]struct{} +} + +func NewMapWhitelist() *MapWhitelist { + return &MapWhitelist{store: make(map[string]struct{})} +} + +func (w *MapWhitelist) Add(pat string) *MapWhitelist { + w.store[pat] = struct{}{} + return w +} + +func (w *MapWhitelist) IsAllowed(u string) bool { + _, b := w.store[u] + return b +} + +// WhitelistFunc is a httprc.Whitelist object based on a function. +// You can perform any sort of check against the given URL to determine +// if it can be fetched or not. +type WhitelistFunc func(string) bool + +func (w WhitelistFunc) IsAllowed(u string) bool { + return w(u) +} diff --git a/vendor/github.com/lestrrat-go/iter/LICENSE b/vendor/github.com/lestrrat-go/iter/LICENSE new file mode 100644 index 0000000000..963209bfba --- /dev/null +++ b/vendor/github.com/lestrrat-go/iter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go b/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go new file mode 100644 index 0000000000..b531e769e0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/iter/arrayiter/arrayiter.go @@ -0,0 +1,192 @@ +package arrayiter + +import ( + "context" + "fmt" + "reflect" + "sync" +) + +func Iterate(ctx context.Context, a interface{}) (Iterator, error) { + arv := reflect.ValueOf(a) + + switch arv.Kind() { + case reflect.Array, reflect.Slice: + default: + return nil, fmt.Errorf(`argument must be an array/slice (%s)`, arv.Type()) + } + + ch := make(chan *Pair) + go func(ctx context.Context, ch chan *Pair, arv reflect.Value) { + defer close(ch) + + for i := 0; i < arv.Len(); i++ { + value := arv.Index(i) + pair := &Pair{ + Index: i, + Value: value.Interface(), + } + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, arv) + + return New(ch), nil +} + +// Source represents a array that knows how to create an iterator +type Source interface { + Iterate(context.Context) Iterator +} + +// Pair represents a single pair of key and value from a array +type Pair struct { + Index int + Value interface{} +} + +// Iterator iterates through keys and values of a array +type Iterator interface { + Next(context.Context) bool + Pair() *Pair +} + +type iter struct { + ch chan *Pair + mu sync.RWMutex + next *Pair +} + +// Visitor represents an object that handles each pair in a array +type Visitor interface { + Visit(int, interface{}) error +} + +// VisitorFunc is a type of Visitor based on a function +type VisitorFunc func(int, interface{}) error + +func (fn VisitorFunc) Visit(s int, v interface{}) error { + return fn(s, v) +} + +func New(ch chan *Pair) Iterator { + return &iter{ + ch: ch, + } +} + +// Next returns true if there are more items to read from the iterator +func (i *iter) Next(ctx context.Context) bool { + i.mu.RLock() + if i.ch == nil { + i.mu.RUnlock() + return false + } + i.mu.RUnlock() + + i.mu.Lock() + defer i.mu.Unlock() + select { + case <-ctx.Done(): + i.ch = nil + return false + case v, ok := <-i.ch: + if !ok { + i.ch = nil + return false + } + i.next = v + return true + } + + //nolint:govet + return false // never reached +} + +// Pair returns the currently buffered Pair. Calling Next() will reset its value +func (i *iter) Pair() *Pair { + i.mu.RLock() + defer i.mu.RUnlock() + return i.next +} + +// Walk walks through each element in the array +func Walk(ctx context.Context, s Source, v Visitor) error { + for i := s.Iterate(ctx); i.Next(ctx); { + pair := i.Pair() + if err := v.Visit(pair.Index, pair.Value); err != nil { + return fmt.Errorf(`failed to visit index %d: %w`, pair.Index, err) + } + } + return nil +} + +func AsArray(ctx context.Context, s interface{}, v interface{}) error { + var iter Iterator + switch reflect.ValueOf(s).Kind() { + case reflect.Array, reflect.Slice: + x, err := Iterate(ctx, s) + if err != nil { + return fmt.Errorf(`failed to iterate over array/slice type: %w`, err) + } + iter = x + default: + ssrc, ok := s.(Source) + if !ok { + return fmt.Errorf(`cannot iterate over %T: not a arrayiter.Source type`, s) + } + iter = ssrc.Iterate(ctx) + } + + dst := reflect.ValueOf(v) + + // dst MUST be a pointer to a array type + if kind := dst.Kind(); kind != reflect.Ptr { + return fmt.Errorf(`dst must be a pointer to a array (%s)`, dst.Type()) + } + + dst = dst.Elem() + switch dst.Kind() { + case reflect.Array, reflect.Slice: + default: + return fmt.Errorf(`dst must be a pointer to an array or slice (%s)`, dst.Type()) + } + + var pairs []*Pair + for iter.Next(ctx) { + pair := iter.Pair() + pairs = append(pairs, pair) + } + + switch dst.Kind() { + case reflect.Array: + if len(pairs) < dst.Len() { + return fmt.Errorf(`dst array does not have enough space for elements (%d, want %d)`, dst.Len(), len(pairs)) + } + case reflect.Slice: + if dst.IsNil() { + dst.Set(reflect.MakeSlice(dst.Type(), len(pairs), len(pairs))) + } + } + + // dst must be assignable + if !dst.CanSet() { + return fmt.Errorf(`dst is not writeable`) + } + + elemtyp := dst.Type().Elem() + for _, pair := range pairs { + rvvalue := reflect.ValueOf(pair.Value) + + if !rvvalue.Type().AssignableTo(elemtyp) { + return fmt.Errorf(`cannot assign key of type %s to map key of type %s`, rvvalue.Type(), elemtyp) + } + + dst.Index(pair.Index).Set(rvvalue) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go b/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go new file mode 100644 index 0000000000..ec332855eb --- /dev/null +++ b/vendor/github.com/lestrrat-go/iter/mapiter/mapiter.go @@ -0,0 +1,195 @@ +package mapiter + +import ( + "context" + "fmt" + "reflect" + "sync" +) + +// Iterate creates an iterator from arbitrary map types. This is not +// the most efficient tool, but it's the quickest way to create an +// iterator for maps. +// Also, note that you cannot make any assumptions on the order of +// pairs being returned. +func Iterate(ctx context.Context, m interface{}) (Iterator, error) { + mrv := reflect.ValueOf(m) + + if mrv.Kind() != reflect.Map { + return nil, fmt.Errorf(`argument must be a map (%s)`, mrv.Type()) + } + + ch := make(chan *Pair) + go func(ctx context.Context, ch chan *Pair, mrv reflect.Value) { + defer close(ch) + for _, key := range mrv.MapKeys() { + value := mrv.MapIndex(key) + pair := &Pair{ + Key: key.Interface(), + Value: value.Interface(), + } + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, mrv) + + return New(ch), nil +} + +// Source represents a map that knows how to create an iterator +type Source interface { + Iterate(context.Context) Iterator +} + +// Pair represents a single pair of key and value from a map +type Pair struct { + Key interface{} + Value interface{} +} + +// Iterator iterates through keys and values of a map +type Iterator interface { + Next(context.Context) bool + Pair() *Pair +} + +type iter struct { + ch chan *Pair + mu sync.RWMutex + next *Pair +} + +// Visitor represents an object that handles each pair in a map +type Visitor interface { + Visit(interface{}, interface{}) error +} + +// VisitorFunc is a type of Visitor based on a function +type VisitorFunc func(interface{}, interface{}) error + +func (fn VisitorFunc) Visit(s interface{}, v interface{}) error { + return fn(s, v) +} + +func New(ch chan *Pair) Iterator { + return &iter{ + ch: ch, + } +} + +// Next returns true if there are more items to read from the iterator +func (i *iter) Next(ctx context.Context) bool { + i.mu.RLock() + if i.ch == nil { + i.mu.RUnlock() + return false + } + i.mu.RUnlock() + + i.mu.Lock() + defer i.mu.Unlock() + select { + case <-ctx.Done(): + i.ch = nil + return false + case v, ok := <-i.ch: + if !ok { + i.ch = nil + return false + } + i.next = v + return true + } + + //nolint:govet + return false // never reached +} + +// Pair returns the currently buffered Pair. Calling Next() will reset its value +func (i *iter) Pair() *Pair { + i.mu.RLock() + defer i.mu.RUnlock() + return i.next +} + +// Walk walks through each element in the map +func Walk(ctx context.Context, s Source, v Visitor) error { + for i := s.Iterate(ctx); i.Next(ctx); { + pair := i.Pair() + if err := v.Visit(pair.Key, pair.Value); err != nil { + return fmt.Errorf(`failed to visit key %s: %w`, pair.Key, err) + } + } + return nil +} + +// AsMap returns the values obtained from the source as a map +func AsMap(ctx context.Context, s interface{}, v interface{}) error { + var iter Iterator + switch reflect.ValueOf(s).Kind() { + case reflect.Map: + x, err := Iterate(ctx, s) + if err != nil { + return fmt.Errorf(`failed to iterate over map type: %w`, err) + } + iter = x + default: + ssrc, ok := s.(Source) + if !ok { + return fmt.Errorf(`cannot iterate over %T: not a mapiter.Source type`, s) + } + iter = ssrc.Iterate(ctx) + } + + dst := reflect.ValueOf(v) + + // dst MUST be a pointer to a map type + if kind := dst.Kind(); kind != reflect.Ptr { + return fmt.Errorf(`dst must be a pointer to a map (%s)`, dst.Type()) + } + + dst = dst.Elem() + if dst.Kind() != reflect.Map { + return fmt.Errorf(`dst must be a pointer to a map (%s)`, dst.Type()) + } + + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + + // dst must be assignable + if !dst.CanSet() { + return fmt.Errorf(`dst is not writeable`) + } + + keytyp := dst.Type().Key() + valtyp := dst.Type().Elem() + + for iter.Next(ctx) { + pair := iter.Pair() + + rvkey := reflect.ValueOf(pair.Key) + rvvalue := reflect.ValueOf(pair.Value) + + if !rvkey.Type().AssignableTo(keytyp) { + return fmt.Errorf(`cannot assign key of type %s to map key of type %s`, rvkey.Type(), keytyp) + } + + switch rvvalue.Kind() { + // we can only check if we can assign to rvvalue to valtyp if it's non-nil + case reflect.Invalid: + rvvalue = reflect.New(valtyp).Elem() + default: + if !rvvalue.Type().AssignableTo(valtyp) { + return fmt.Errorf(`cannot assign value of type %s to map value of type %s`, rvvalue.Type(), valtyp) + } + } + + dst.SetMapIndex(rvkey, rvvalue) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/.gitignore b/vendor/github.com/lestrrat-go/jwx/v2/.gitignore new file mode 100644 index 0000000000..605219c24f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# IDE +.idea +.vscode +.DS_Store +*~ + +coverage.out + +# I redirect my test output to files named "out" way too often +out + +cmd/jwx/jwx diff --git a/vendor/github.com/lestrrat-go/jwx/v2/.golangci.yml b/vendor/github.com/lestrrat-go/jwx/v2/.golangci.yml new file mode 100644 index 0000000000..aa4a7a65b4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/.golangci.yml @@ -0,0 +1,97 @@ +run: + +linters-settings: + govet: + enable-all: true + disable: + - shadow + - fieldalignment + +linters: + enable-all: true + disable: + - cyclop + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - golint #deprecated + - gomnd + - gosec + - govet + - interfacer # deprecated + - ifshort + - ireturn # No, I _LIKE_ returning interfaces + - lll + - maintidx # Do this in code review + - maligned # deprecated + - makezero + - nakedret + - nestif + - nlreturn + - nonamedreturns # visit this back later + - nosnakecase + - paralleltest + - scopelint # deprecated + - tagliatelle + - testpackage + - thelper # Tests are fine + - varnamelen # Short names are ok + - wrapcheck + - wsl + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - revive + - path: /*.go + linters: + - contextcheck + - exhaustruct + - path: /main.go + linters: + - errcheck + - path: internal/codegen/codegen.go + linters: + - errcheck + - path: internal/jwxtest/jwxtest.go + linters: + - errcheck + - errchkjson + - forcetypeassert + - path: /*_test.go + linters: + - errcheck + - errchkjson + - forcetypeassert + - path: /*_example_test.go + linters: + - forbidigo + - path: cmd/jwx/jwx.go + linters: + - forbidigo + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/Changes b/vendor/github.com/lestrrat-go/jwx/v2/Changes new file mode 100644 index 0000000000..f68a2efca5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/Changes @@ -0,0 +1,208 @@ +Changes +======= + +v2 has many incompatibilities with v1. To see the full list of differences between +v1 and v2, please read the Changes-v2.md file (https://github.com/lestrrat-go/jwx/blob/develop/v2/Changes-v2.md) + +v2.0.8 - 25 Nov 2022 +[Security Fixes] + * [jws][jwe] Starting from go 1.19, code related to elliptic algorithms + panics (instead of returning an error) when certain methods + such as `ScalarMult` are called using points that are not on the + elliptic curve being used. + + Using inputs that cause this condition, and you accept unverified JWK + from the outside it may be possible for a third-party to cause panics + in your program. + + This has been fixed by verifying that the point being used is actually + on the curve before such computations (#840) +[Miscellaneous] + * `jwx.GuessFormat` now returns `jwx.InvalidFormat` when the heuristics + is sure that the buffer format is invalid. + +v2.0.7 - 15 Nov 2022 +[New features] + * [jwt] Each `jwt.Token` now has an `Options()` method + * [jwt] `jwt.Settings(jwt.WithFlattenedAudience(true))` has a slightly + different semantic than before. Instead of changing a global variable, + it now specifies that the default value of each per-token option for + `jwt.FlattenAudience` is true. + + Therefore, this is what happens: + + // No global settings + tok := jwt.New() + tok.Options.IsEnabled(jwt.FlattenAudience) // false + + // With global settings + jwt.Settings(jwt.WithFlattenedAudience(true)) + tok := jwt.New() + tok.Options.IsEnabled(jwt.FlattenAudience) // true + // But you can still turn FlattenAudience off for this + // token alone + tok.Options.Disable(jwt.FlattenAudience) + + Note that while unlikely to happen for users relying on the old behavior, + this change DOES introduce timing issues: whereas old versions switched the + JSON marshaling for ALL tokens immediately after calling `jwt.Settings`, + the new behavior does NOT affect tokens that have been created before the + call to `jwt.Settings` (but marshaled afterwards). + + So the following may happen: + + // < v2.0.7 + tok := jwt.New() + jwt.Settings(jwt.WithFlattenedAudience(true)) + json.Marshal(tok) // flatten = on + + // >= v2.0.7 + tok := jwt.New() // flatten = off + jwt.Settings(jwt.WithFlattenedAudience(true)) + json.Marshal(tok) // flatten is still off + + It is recommended that you only set the global setting once at the + very beginning of your program to avoid problems. + + Also note that `Clone()` copies the settings as well. + +[Miscellaneous] + * WithCompact's stringification should have been that of the + internal indentity struct ("WithSerialization"), but it was + wrongly producing "WithCompact". This has been fixed. + * Go Workspaces have been enabled within this module. + - When developing, modules will refer to the main jwx module that they + are part of. This allows us to explicitly specify the dependency version + in, for example, ./cmd/jwx/go.mod but still develop against the local version. + - If you are using `goimports` and other tools, you might want to upgrade + binaries -- for example, when using vim-go's auto-format-on-save feature, + my old binaries took well over 5~10 seconds to compute the import paths. + This was fixed when I switched to using go1.19, and upgraded the binaries + used by vim-go + +v2.0.6 - 25 Aug 2022 +[Bug fixes][Security] + * [jwe] Agreement Party UInfo and VInfo (apv/apu) were not properly being + passed to the functions to compute the aad when encrypting using ECDH-ES + family of algorithms. Therefore, when using apu/apv, messages encrypted + via this module would have failed to be properly decrypted. + + Please note that bogus encrypted messages would not have succeed being + decrypted (i.e. this problem does not allow spoofed messages to be decrypted). + Therefore this would not have caused unwanted data to to creep in -- + however it did pose problems for data to be sent and decrypted from this module + when using ECDH-ES with apu/apv. + + While not extensively tested, we believe this regression was introduced + with the v2 release. + +v2.0.5 - 11 Aug 2022 +[Bug fixes] + * [jwt] Remove stray debug log + * [jwk] Fix x5u field name, caused by a typo + * [misc] Update golangci-lint action to v3; v2 was causing weird problems + +v2.0.4 - 19 Jul 2022 +[Bug Fixes] + * [jwk] github.com/lestrrat-go/httprc, which jwk.Cache depends on, + had a problem with inserting URLs to be re-fetched into its queue. + As a result it could have been the case that some JWKS were not + updated properly. Please upgrade if you use jwk.Cache. + + * [jwk] cert.Get could fail with an out of bounds index look up + + * [jwk] Fix doc buglet in `KeyType()` method + +[New Features] + * [jws] Add `jws.WithMultipleKeysPerKeyID()` sub-option to allow non-unique + key IDs in a given JWK set. By default we assume that a key ID is unique + within a key set, but enabling this option allows you to handle JWK sets + that contain multiple keys that contain the same key ID. + + * [jwt] Before v2.0.1, sub-second accuracy for time based fields + (i.e. `iat`, `exp`, `nbf`) were not respected. Because of this the code + to evaluate this code had always truncated any-subsecond portion + of these fields, and therefore no sub-second comparisons worked. + A new option for validation `jwt.WithTruncation()` has been added + to workaround this. This option controls the value used to truncate + the time fields. When set to 0, sub-second comparison would be + possible. + FIY, truncatation will still happen because we do not want to + use the monotonic clocks when making comparisons. It's just that + truncating using `0` as its argument effectively only strips out + the monotonic clock + +v2.0.3 - 13 Jun 2022 +[Bug Fixes] + * [jwk] Update dependency on github.com/lestrrat-go/httprc to v1.0.2 to + avoid unintended blocking in the update goroutine for jwk.Cache + +v2.0.2 - 23 May 2022 +[Bug Fixes][Security] + * [jwe] An old bug from at least 7 years ago existed in handling AES-CBC unpadding, + where the unpad operation might remove more bytes than necessary (#744) + This affects all jwx code that is available before v2.0.2 and v1.2.25. + +[New Features] + * [jwt] RFC3339 timestamps are also accepted for Numeric Date types in JWT tokens. + This allows users to parse servers that errnously use RFC3339 timestamps in + some pre-defined fields. You can change this behavior by setting + `jwt.WithNumericDateParsePedantic` to `false` + * [jwt] `jwt.WithNumericDateParsePedantic` has been added. This is a global + option that is set using `jwt.Settings` + +v2.0.1 - 06 May 2022 + * [jwk] `jwk.Set` had erronously been documented as not returning an error + when the same key already exists in the set. This is a behavior change + since v2, and it was missing in the docs (#730) + * [jwt] `jwt.ErrMissingRequiredClaim` has been deprecated. Please use + `jwt.ErrRequiredClaim` instead. + * [jwt] `jwt.WithNumericDateParsePrecision` and `jwt.WithNumericDateFormatPrecision` + have been added to parse and format fractional seconds. These options can be + passed to `jwt.Settings`. + The default precision is set to 0, and fractional portions are not parsed nor + formatted. The precision may be set up to 9. + * `golang.org/x/crypto` has been upgraded (#724) + * `io/ioutil` has been removed from the source code. + +v2.0.0 - 24 Apr 2022 + * This i the first v2 release, which represents a set of design changes + that were learnt over the previous 2 years. As a result the v2 API + should be much more consistent and uniform across packages, and + should be much more flexible to accomodate real-world needs. + + For a complete list of changes, please see the Changes-v2.md file, + or check the diff at https://github.com/lestrrat-go/jwx/compare/v1...v2 + +[Miscellaneous] + * Minor house cleaning on code generation tools + +[jwt] + * `jwt.ErrMissingRequiredClaim()` has been added + +v2.0.0-beta2 - 16 Apr 2022 +[jwk] + * Updated `jwk.Set` API and reflected pending changes from v1 which were + left over. Please see Changes-v2.md file for details. + + * Added `jwk.CachedSet`, a shim over `jwk.Cache` that allows you to + have to write wrappers around `jwk.Cache` that retrieves a particular + `jwk.Set` out of it. You can use it to, for example, pass `jwk.CachedSet` + to a `jws.Verify` + + cache := jwk.NewCache(ctx) + cache.Register(ctx, jwksURL) + cachedSet := jwk.NewCachedSet(cache, jwksURL) + jws.Verify(signed, jws.WithKeySet(cachedSet)) + +v2.0.0-beta1 - 09 Apr 2022 +[Miscellaneous] + * Renamed Changes.v2 to Changes-v2.md + * Housecleaning for lint action. + * While v2 was not affected, ported over equivalent test for #681 to catch + regressions in the future. + * Please note that there is no stability guarantees on pre-releases. + +v2.0.0-alpha1 - 04 Apr 2022 + * Initial pre-release of v2 line. Please note that there is no stability guarantees + on pre-releases. diff --git a/vendor/github.com/lestrrat-go/jwx/v2/Changes-v2.md b/vendor/github.com/lestrrat-go/jwx/v2/Changes-v2.md new file mode 100644 index 0000000000..1395c39a6b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/Changes-v2.md @@ -0,0 +1,390 @@ +# Incompatible Changes from v1 to v2 + +These are changes that are incompatible with the v1.x.x version. + +* [tl;dr](#tldr) - If you don't feel like reading the details -- but you will read the details, right? +* [Detailed List of Changes](#detailed-list-of-changes) - A comprehensive list of changes from v1 to v2 + +# tl;dr + +## JWT + +```go +// most basic +jwt.Parse(serialized, jwt.WithKey(alg, key)) // NOTE: verification and validation are ENABLED by default! +jwt.Sign(token, jwt.WithKey(alg,key)) + +// with a jwk.Set +jwt.Parse(serialized, jwt.WithKeySet(set)) + +// UseDefault/InferAlgorithm with JWKS +jwt.Parse(serialized, jwt.WithKeySet(set, + jws.WithUseDefault(true), jws.WithInferAlgorithm(true)) + +// Use `jku` +jwt.Parse(serialized, jwt.WithVerifyAuto(...)) + +// Any other custom key provisioning (using functions in this +// example, but can be anything that fulfills jws.KeyProvider) +jwt.Parse(serialized, jwt.WithKeyProvider(jws.KeyProviderFunc(...))) +``` + +## JWK + +```go +// jwk.New() was confusing. Renamed to fit the actual implementation +key, err := jwk.FromRaw(rawKey) + +// Algorithm() now returns jwa.KeyAlgorithm type. `jws.Sign()` +// and other function that receive JWK algorithm names accept +// this new type, so you can use the same key and do the following +// (previosly you needed to type assert) +jws.Sign(payload, jws.WithKey(key.Algorithm(), key)) + +// If you need the specific type, type assert +key.Algorithm().(jwa.SignatureAlgorithm) + +// jwk.AutoRefresh is no more. Use jwk.Cache +cache := jwk.NewCache(ctx, options...) + +// Certificate chains are no longer jwk.CertificateChain type, but +// *(github.com/lestrrat-go/jwx/cert).Chain +cc := key.X509CertChain() // this is *cert.Chain now +``` + +## JWS + +```go +// basic +jws.Sign(payload, jws.WithKey(alg, key)) +jws.Sign(payload, jws.WithKey(alg, key), jws.WithKey(alg, key), jws.WithJSON(true)) +jws.Verify(signed, jws.WithKey(alg, key)) + +// other ways to pass the key +jws.Sign(payload, jws.WithKeySet(jwks)) +jws.Sign(payload, jws.WithKeyProvider(kp)) + +// retrieve the key that succeeded in verifying +var keyUsed interface{} +jws.Verify(signed, jws.WithKeySet(jwks), jws.WithKeyUsed(&keyUsed)) +``` + +## JWE + +```go +// basic +jwe.Encrypt(payload, jwe.WithKey(alg, key)) // other defaults are infered +jwe.Encrypt(payload, jwe.WithKey(alg, key), jwe.WithKey(alg, key), jwe.WithJSON(true)) +jwe.Decrypt(encrypted, jwe.WithKey(alg, key)) + +// other ways to pass the key +jwe.Encrypt(payload, jwe.WithKeySet(jwks)) +jwe.Encrypt(payload, jwe.WithKeyProvider(kp)) + +// retrieve the key that succeeded in decrypting +var keyUsed interface{} +jwe.Verify(signed, jwe.WithKeySet(jwks), jwe.WithKeyUsed(&keyUsed)) +``` + +# Detailed List of Changes + +## Module + +* Module now requires go 1.16 + +* Use of github.com/pkg/errors is no more. If you were relying on bevaior + that depends on the errors being an instance of github.com/pkg/errors + then you need to change your code + +* File-generation tools have been moved out of internal/ directories. + These files pre-dates Go modules, and they were in internal/ in order + to avoid being listed in the `go doc` -- however, now that we can + make them separate modules this is no longer necessary. + +* New package `cert` has been added to handle `x5c` certificate + chains, and to work with certificates + * cert.Chain to store base64 encoded ASN.1 DER format certificates + * cert.EncodeBase64 to encode ASN.1 DER format certificate using base64 + * cert.Create to create a base64 encoded ASN.1 DER format certificates + * cert.Parse to parse base64 encoded ASN.1 DER format certificates + +## JWE + +* `jwe.Compact()`'s signature has changed to + `jwe.Compact(*jwe.Message, ...jwe.CompactOption)` + +* `jwe.JSON()` has been removed. You can generate JSON serialization + using `jwe.Encrypt(jwe.WitJSON())` or `json.Marshal(jwe.Message)` + +* `(jwe.Message).Decrypt()` has been removed. Since formatting of the + original serialized message matters (including whitespace), using a parsed + object was inherently confusing. + +* `jwe.Encrypt()` can now generate JWE messages in either compact or JSON + forms. By default, the compact form is used. JSON format can be + enabled by using the `jwe.WithJSON` option. + +* `jwe.Encrypt()` can now accept multiple keys by passing multiple + `jwe.WithKey()` options. This can be used with `jwe.WithJSON` to + create JWE messages with multiple recipients. + +* `jwe.DecryptEncryptOption()` has been renamed to `jwe.EncryptDecryptOption()`. + This is so that it is more uniform with `jws` equivalent of `jws.SignVerifyOption()` + where the producer (`Sign`) comes before the consumer (`Verify`) in the naming + +* `jwe.WithCompact` and `jwe.WithJSON` options have been added + to control the serialization format. + +* jwe.Decrypt()'s method signature has been changed to `jwt.Decrypt([]byte, ...jwe.DecryptOption) ([]byte, error)`. + These options can be stacked. Therefore, you could configure the + verification process to attempt a static key pair, a JWKS, and only + try other forms if the first two fails, for example. + + - For static key pair, use `jwe.WithKey()` + - For static JWKS, use `jwe.WithKeySet()` (NOTE: InferAlgorithmFromKey like in `jws` package is NOT supported) + - For custom, possibly dynamic key provisioning, use `jwe.WithKeyProvider()` + +* jwe.Decrypter has been unexported. Users did not need this. + +* jwe.WithKeyProvider() has been added to specify arbitrary + code to specify which keys to try. + +* jwe.KeyProvider interface has been added + +* jwe.KeyProviderFunc has been added + +* `WithPostParser()` has been removed. You can achieve the same effect + by using `jwe.WithKeyProvider()`. Because this was the only consumer for + `jwe.DecryptCtx`, this type has been removed as well. + +* `x5c` field type has been changed to `*cert.Chain` instead of `[]string` + +* Method signature for `jwe.Parse()` has been changed to include options, + but options are currently not used + +* `jwe.ReadFile` now supports the option `jwe.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* jwe.WithKeyUsed has been added to allow users to retrieve + the key used for decryption. This is useful in cases you provided + multiple keys and you want to know which one was successful + +## JWK + +* `jwk.New()` has been renamed to `jwk.FromRaw()`, which hopefully will + make it easier for the users what the input should be. + +* `jwk.Set` has many interface changes: + * Changed methods to match jwk.Key and its semantics: + * Field is now Get() (returns values for arbitrary fields other than keys). Fetching a key is done via Key() + * Remove() now removes arbitrary fields, not keys. to remove keys, use RemoveKey() + * Iterate has been added to iterate through all non-key fields. + * Add is now AddKey(Key) string, and returns an error when the same key is added + * Get is now Key(int) (Key, bool) + * Remove is now RemoveKey(Key) error + * Iterate is now Keys(context.Context) KeyIterator + * Clear is now Clear() error + +* `jwk.CachedSet` has been added. You can create a `jwk.Set` that is backed by + `jwk.Cache` so you can do this: + +```go +cache := jkw.NewCache(ctx) +cachedSet := jwk.NewCachedSet(cache, jwksURI) + +// cachedSet is always the refreshed, cached version from jwk.Cache +jws.Verify(signed, jws.WithKeySet(cachedSet)) +``` + +* `jwk.NewRSAPRivateKey()`, `jwk.NewECDSAPrivateKey()`, etc have been removed. + There is no longer any way to create concrete types of `jwk.Key` + +* `jwk.Key` type no longer supports direct unmarshaling via `json.Unmarshal()`, + because you can no longer instantiate concrete `jwk.Key` types. You will need to + use `jwk.ParseKey()`. See the documentation for ways to parse JWKs. + +* `(jwk.Key).Algorithm()` is now of `jwk.KeyAlgorithm` type. This field used + to be `string` and therefore could not be passed directly to `jwt.Sign()` + `jws.Sign()`, `jwe.Encrypt()`, et al. This is no longer the case, and + now you can pass it directly. See + https://github.com/lestrrat-go/jwx/blob/v2/docs/99-faq.md#why-is-jwkkeyalgorithm-and-jwakeyalgorithm-so-confusing + for more details + +* `jwk.Fetcher` and `jwk.FetchFunc` has been added. + They represent something that can fetch a `jwk.Set` + +* `jwk.CertificateChain` has been removed, use `*cert.Chain` +* `x5c` field type has been changed to `*cert.Chain` instead of `[]*x509.Certificate` + +* `jwk.ReadFile` now supports the option `jwk.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* Added `jwk.PostFetcher`, `jwk.PostFetchFunc`, and `jwk.WithPostFetch` to + allow users to get at the `jwk.Set` that was fetched in `jwk.Cache`. + This will make it possible for users to supply extra information and edit + `jwk.Set` after it has been fetched and parsed, but before it is cached. + You could, for example, modify the `alg` field so that it's easier to + work with when you use it in `jws.Verify` later. + +* Reworked `jwk.AutoRefresh` in terms of `github.com/lestrrat-go/httprc` + and renamed it `jwk.Cache`. + + Major difference between `jwk.AutoRefresh` and `jwk.Cache` is that while + former used one `time.Timer` per resource, the latter uses a static timer + (based on `jwk.WithRefreshWindow()` value, default 15 minutes) that periodically + refreshes all resources that were due to be refreshed within that time frame. + + This method may cause your updates to happen slightly later, but uses significantly + less resources and is less prone to clogging. + +* Reimplemented `jwk.Fetch` in terms of `github.com/lestrrat-go/httprc`. + +* Previously `jwk.Fetch` and `jwk.AutoRefresh` respected backoff options, + but this has been removed. This is to avoid unwanted clogging of the fetch workers + which is the default processing mode in `github.com/lestrrat-go/httprc`. + + If you are using backoffs, you need to control your inputs more carefully so as to + not clog your fetch queue, and therefore you should be writing custom code that + suits your needs + +## JWS + +* `jws.Sign()` can now generate JWS messages in either compact or JSON + forms. By default, the compact form is used. JSON format can be + enabled by using the `jws.WithJSON` option. + +* `jws.Sign()` can now accept multiple keys by passing multiple + `jws.WithKey()` options. This can be used with `jws.WithJSON` to + create JWS messages with multiple signatures. + +* `jws.WithCompact` and `jws.WithJSON` options have been added + to control the serialization format. + +* jws.Verify()'s method signature has been changed to `jwt.Verify([]byte, ...jws.VerifyOption) ([]byte, error)`. + These options can be stacked. Therefore, you could configure the + verification process to attempt a static key pair, a JWKS, and only + try other forms if the first two fails, for example. + + - For static key pair, use `jws.WithKey()` + - For static JWKS, use `jws.WithKeySet()` + - For enabling verification using `jku`, use `jws.WithVerifyAuto()` + - For custom, possibly dynamic key provisioning, use `jws.WithKeyProvider()` + +* jws.WithVerify() has been removed. + +* jws.WithKey() has been added to specify an algorithm + key to + verify the payload with. + +* jws.WithKeySet() has been added to specify a JWKS to be used for + verification. By default `kid` AND `alg` must match between the signature + and the key. + + The option can take further suboptions: + +```go +jws.Parse(serialized, + jws.WithKeySet(set, + // by default `kid` is required. set false to disable. + jws.WithRequireKid(false), + // optionally skip matching kid if there's exactly one key in set + jws.WithUseDefault(true), + // infer algorithm name from key type + jws.WithInferAlgorithm(true), + ), +) +``` + +* `jws.VerifuAuto` has been removed in favor of using + `jws.WithVerifyAuto` option with `jws.Verify()` + +* `jws.WithVerifyAuto` has been added to enable verification + using `jku`. + + The first argument must be a jwk.Fetcher object, but can be + set to `nil` to use the default implementation which is `jwk.Fetch` + + The rest of the arguments are treated as options passed to the + `(jwk.Fetcher).Fetch()` function. + +* Remove `jws.WithPayloadSigner()`. This should be completely repleceable + using `jws.WithKey()` + +* jws.WithKeyProvider() has been added to specify arbitrary + code to specify which keys to try. + +* jws.KeyProvider interface has been added + +* jws.KeyProviderFunc has been added + +* jws.WithKeyUsed has been added to allow users to retrieve + the key used for verification. This is useful in cases you provided + multiple keys and you want to know which one was successful + +* `x5c` field type has been changed to `*cert.Chain` instead of `[]string` + +* `jws.ReadFile` now supports the option `jws.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +## JWT + +* `jwt.Parse` now verifies the signature and validates the token + by default. You must disable it explicitly using `jwt.WithValidate(false)` + and/or `jwt.WithVerify(false)` if you only want to parse the JWT message. + + If you don't want either, a convenience function `jwt.ParseInsecure` + has been added. + +* `jwt.Parse` can only parse raw JWT (JSON) or JWS (JSON or Compact). + It no longer accepts JWE messages. + +* `jwt.WithDecrypt` has been removed + +* `jwt.WithJweHeaders` has been removed + +* `jwt.WithVerify()` has been renamed to `jwt.WithKey()`. The option can + be used for signing, encryption, and parsing. + +* `jwt.Validator` has been changed to return `jwt.ValidationError`. + If you provide a custom validator, you should wrap the error with + `jwt.NewValidationError()` + +* `jwt.UseDefault()` has been removed. You should use `jws.WithUseDefault()` + as a suboption in the `jwt.WithKeySet()` option. + +```go +jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithUseDefault(true))) +``` + +* `jwt.InferAlgorithmFromKey()` has been removed. You should use + `jws.WithInferAlgorithmFromKey()` as a suboption in the `jwt.WithKeySet()` option. + +```go +jwt.Parse(serialized, jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true))) +``` + +* jwt.WithKeySetProvider has been removed. Use `jwt.WithKeyProvider()` + instead. If jwt.WithKeyProvider seems a bit complicated, use a combination of + JWS parse, no-verify/validate JWT parse, and an extra JWS verify: + +```go +msg, _ := jws.Parse(signed) +token, _ := jwt.Parse(msg.Payload(), jwt.WithVerify(false), jwt.WithValidate(false)) +// Get information out of token, for example, `iss` +switch token.Issuer() { +case ...: + jws.Verify(signed, jwt.WithKey(...)) +} +``` + +* `jwt.WithHeaders` and `jwt.WithJwsHeaders` have been removed. + You should be able to use the new `jwt.WithKey` option to pass headers + +* `jwt.WithSignOption` and `jwt.WithEncryptOption` have been added as + escape hatches for options that are declared in `jws` and `jwe` packages + but not in `jwt` + +* `jwt.ReadFile` now supports the option `jwt.WithFS` which allows you to + read data from arbitrary `fs.FS` objects + +* `jwt.Sign()` has been changed so that it works more like the new `jws.Sign()` + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/LICENSE b/vendor/github.com/lestrrat-go/jwx/v2/LICENSE new file mode 100644 index 0000000000..205e33a7f1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/Makefile b/vendor/github.com/lestrrat-go/jwx/v2/Makefile new file mode 100644 index 0000000000..f3247b25e1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/Makefile @@ -0,0 +1,83 @@ +.PHONY: generate realclean cover viewcover test lint check_diffs imports tidy jwx +generate: + @go generate + @$(MAKE) generate-jwa generate-jwe generate-jwk generate-jws generate-jwt + @./tools/cmd/gofmt.sh + +generate-%: + @go generate $(shell pwd -P)/$(patsubst generate-%,%,$@) + +realclean: + rm coverage.out + +test-cmd: + env TESTOPTS="$(TESTOPTS)" ./tools/test.sh + +test: + $(MAKE) test-stdlib TESTOPTS= + +test-stdlib: + $(MAKE) test-cmd TESTOPTS= + +test-goccy: + $(MAKE) test-cmd TESTOPTS="-tags jwx_goccy" + +test-es256k: + $(MAKE) test-cmd TESTOPTS="-tags jwx_es256k" + +test-alltags: + $(MAKE) test-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k" + +cover-cmd: + env MODE=cover ./tools/test.sh + +cover: + $(MAKE) cover-stdlib + +cover-stdlib: + $(MAKE) cover-cmd TESTOPTS= + +cover-goccy: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_goccy" + +cover-es256k: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_es256k" + +cover-alltags: + $(MAKE) cover-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k" + +smoke-cmd: + env MODE=short ./tools/test.sh + +smoke: + $(MAKE) smoke-stdlib + +smoke-stdlib: + $(MAKE) smoke-cmd TESTOPTS= + +smoke-goccy: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy" + +smoke-es256k: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_es256k" + +smoke-alltags: + $(MAKE) smoke-cmd TESTOPTS="-tags jwx_goccy,jwx_es256k" + +viewcover: + go tool cover -html=coverage.out + +lint: + golangci-lint run ./... + +check_diffs: + ./scripts/check-diff.sh + +imports: + goimports -w ./ + +tidy: + ./scripts/tidy.sh + +jwx: + @./tools/cmd/install-jwx.sh diff --git a/vendor/github.com/lestrrat-go/jwx/v2/README.md b/vendor/github.com/lestrrat-go/jwx/v2/README.md new file mode 100644 index 0000000000..e9f2a84fad --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/README.md @@ -0,0 +1,251 @@ +# github.com/lestrrat-go/jwx/v2 ![](https://github.com/lestrrat-go/jwx/workflows/CI/badge.svg?branch=v2) [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2) [![codecov.io](https://codecov.io/github/lestrrat-go/jwx/coverage.svg?branch=v2)](https://codecov.io/github/lestrrat-go/jwx?branch=v2) + +Go module implementing various JWx (JWA/JWE/JWK/JWS/JWT, otherwise known as JOSE) technologies. + +If you are using this module in your product or your company, please add your product and/or company name in the [Wiki](https://github.com/lestrrat-go/jwx/wiki/Users)! It really helps keeping up our motivation. + +# Features + +* Complete coverage of JWA/JWE/JWK/JWS/JWT, not just JWT+minimum tool set. + * Supports JWS messages with multiple signatures, both compact and JSON serialization + * Supports JWS with detached payload + * Supports JWS with unencoded payload (RFC7797) + * Supports JWE messages with multiple recipients, both compact and JSON serialization + * Most operations work with either JWK or raw keys e.g. *rsa.PrivateKey, *ecdsa.PrivateKey, etc). +* Opinionated, but very uniform API. Everything is symmetric, and follows a standard convetion + * jws.Parse/Verify/Sign + * jwe.Parse/Encrypt/Decrypt + * Arguments are organized as explicit required paramters and optional WithXXXX() style options. +* Extra utilities + * `jwk.Cache` to always keep a JWKS up-to-date + +Some more in-depth discussion on why you might want to use this library over others +can be found in the [Description section](#description) + +If you are using v0 or v1, you are strongly encouraged to migrate to using v2 +(the version that comes with the README you are reading). + +# SYNOPSIS + + +```go +package examples_test + +import ( + "bytes" + "fmt" + "net/http" + "time" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/jwx/v2/jwt" +) + +func ExampleJWX() { + // Parse, serialize, slice and dice JWKs! + privkey, err := jwk.ParseKey(jsonRSAPrivateKey) + if err != nil { + fmt.Printf("failed to parse JWK: %s\n", err) + return + } + + pubkey, err := jwk.PublicKeyOf(privkey) + if err != nil { + fmt.Printf("failed to get public key: %s\n", err) + return + } + + // Work with JWTs! + { + // Build a JWT! + tok, err := jwt.NewBuilder(). + Issuer(`github.com/lestrrat-go/jwx`). + IssuedAt(time.Now()). + Build() + if err != nil { + fmt.Printf("failed to build token: %s\n", err) + return + } + + // Sign a JWT! + signed, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, privkey)) + if err != nil { + fmt.Printf("failed to sign token: %s\n", err) + return + } + + // Verify a JWT! + { + verifiedToken, err := jwt.Parse(signed, jwt.WithKey(jwa.RS256, pubkey)) + if err != nil { + fmt.Printf("failed to verify JWS: %s\n", err) + return + } + _ = verifiedToken + } + + // Work with *http.Request! + { + req, err := http.NewRequest(http.MethodGet, `https://github.com/lestrrat-go/jwx`, nil) + req.Header.Set(`Authorization`, fmt.Sprintf(`Bearer %s`, signed)) + + verifiedToken, err := jwt.ParseRequest(req, jwt.WithKey(jwa.RS256, pubkey)) + if err != nil { + fmt.Printf("failed to verify token from HTTP request: %s\n", err) + return + } + _ = verifiedToken + } + } + + // Encrypt and Decrypt arbitrary payload with JWE! + { + encrypted, err := jwe.Encrypt(payloadLoremIpsum, jwe.WithKey(jwa.RSA_OAEP, jwkRSAPublicKey)) + if err != nil { + fmt.Printf("failed to encrypt payload: %s\n", err) + return + } + + decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA_OAEP, jwkRSAPrivateKey)) + if err != nil { + fmt.Printf("failed to decrypt payload: %s\n", err) + return + } + + if !bytes.Equal(decrypted, payloadLoremIpsum) { + fmt.Printf("verified payload did not match\n") + return + } + } + + // Sign and Verify arbitrary payload with JWS! + { + signed, err := jws.Sign(payloadLoremIpsum, jws.WithKey(jwa.RS256, jwkRSAPrivateKey)) + if err != nil { + fmt.Printf("failed to sign payload: %s\n", err) + return + } + + verified, err := jws.Verify(signed, jws.WithKey(jwa.RS256, jwkRSAPublicKey)) + if err != nil { + fmt.Printf("failed to verify payload: %s\n", err) + return + } + + if !bytes.Equal(verified, payloadLoremIpsum) { + fmt.Printf("verified payload did not match\n") + return + } + } + // OUTPUT: +} +``` +source: [examples/jwx_readme_example_test.go](https://github.com/lestrrat-go/jwx/blob/v2/examples/jwx_readme_example_test.go) + + +# How-to Documentation + +* [API documentation](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2) +* [How-to style documentation](./docs) +* [Runnable Examples](./examples) + +# Description + +This Go module implements JWA, JWE, JWK, JWS, and JWT. Please see the following table for the list of +available packages: + +| Package name | Notes | +|-----------------------------------------------------------|-------------------------------------------------| +| [jwt](https://github.com/lestrrat-go/jwx/tree/v2/jwt) | [RFC 7519](https://tools.ietf.org/html/rfc7519) | +| [jwk](https://github.com/lestrrat-go/jwx/tree/v2/jwk) | [RFC 7517](https://tools.ietf.org/html/rfc7517) + [RFC 7638](https://tools.ietf.org/html/rfc7638) | +| [jwa](https://github.com/lestrrat-go/jwx/tree/v2/jwa) | [RFC 7518](https://tools.ietf.org/html/rfc7518) | +| [jws](https://github.com/lestrrat-go/jwx/tree/v2/jws) | [RFC 7515](https://tools.ietf.org/html/rfc7515) + [RFC 7797](https://tools.ietf.org/html/rfc7797) | +| [jwe](https://github.com/lestrrat-go/jwx/tree/v2/jwe) | [RFC 7516](https://tools.ietf.org/html/rfc7516) | +## History + +My goal was to write a server that heavily uses JWK and JWT. At first glance +the libraries that already exist seemed sufficient, but soon I realized that + +1. To completely implement the protocols, I needed the entire JWT, JWK, JWS, JWE (and JWA, by necessity). +2. Most of the libraries that existed only deal with a subset of the various JWx specifications that were necessary to implement their specific needs + +For example, a certain library looks like it had most of JWS, JWE, JWK covered, but then it lacked the ability to include private claims in its JWT responses. Another library had support of all the private claims, but completely lacked in its flexibility to generate various different response formats. + +Because I was writing the server side (and the client side for testing), I needed the *entire* JOSE toolset to properly implement my server, **and** they needed to be *flexible* enough to fulfill the entire spec that I was writing. + +So here's `github.com/lestrrat-go/jwx/v2`. This library is extensible, customizable, and hopefully well organized to the point that it is easy for you to slice and dice it. + +## Why would I use this library? + +There are several other major Go modules that handle JWT and related data formats, +so why should you use this library? + +From a purely functional perspective, the only major difference is this: +Whereas most other projects only deal with what they seem necessary to handle +JWTs, this module handles the **_entire_** spectrum of JWS, JWE, JWK, and JWT. + +That is, if you need to not only parse JWTs, but also to control JWKs, or +if you need to handle payloads that are NOT JWTs, you should probably consider +using this module. You should also note that JWT is built _on top_ of those +other technologies. You simply cannot have a complete JWT package without +implementing the entirety of JWS/JWE/JWK, which this library does. + +Next, from an implementation perspective, this module differs significantly +from others in that it tries very hard to expose only the APIs, and not the +internal data. For example, individual JWT claims are not accessible through +struct field lookups. You need to use one of the getter methods. + +This is because this library takes the stance that the end user is fully capable +and even willing to shoot themselves on the foot when presented with a lax +API. By making sure that users do not have access to open structs, we can protect +users from doing silly things like creating _incomplete_ structs, or access the +structs concurrently without any protection. This structure also allows +us to put extra smarts in the structs, such as doing the right thing when +you want to parse / write custom fields (this module does not require the user +to specify alternate structs to parse objects with custom fields) + +In the end I think it comes down to your usage pattern, and priorities. +Some general guidelines that come to mind are: + +* If you want a single library to handle everything JWx, such as using JWE, JWK, JWS, handling [auto-refreshing JWKs](https://github.com/lestrrat-go/jwx/blob/v2/docs/04-jwk.md#auto-refreshing-remote-keys), use this module. +* If you want to honor all possible custom fields transparently, use this module. +* If you want a standardized clean API, use this module. + +Otherwise, feel free to choose something else. + +# Contributions + +## Issues + +For bug reports and feature requests, please try to follow the issue templates as much as possible. +For either bug reports or feature requests, failing tests are even better. + +## Pull Requests + +Please make sure to include tests that excercise the changes you made. + +If you are editing auto-generated files (those files with the `_gen.go` suffix, please make sure that you do the following: + +1. Edit the generator, not the generated files (e.g. internal/cmd/genreadfile/main.go) +2. Run `make generate` (or `go generate`) to generate the new code +3. Commit _both_ the generator _and_ the generated files + +## Discussions / Usage + +Please try [discussions](https://github.com/lestrrat-go/jwx/tree/v2/discussions) first. + +# Related Modules + +* [github.com/lestrrat-go/echo-middileware-jwx](https://github.com/lestrrat-go/echo-middleware-jwx) - Sample Echo middleware +* [github.com/jwx-go/crypto-signer/gcp](https://github.com/jwx-go/crypto-signer/tree/main/gcp) - GCP KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer) +* [github.com/jwx-go/crypto-signer/aws](https://github.com/jwx-go/crypto-signer/tree/main/aws) - AWS KMS wrapper that implements [`crypto.Signer`](https://pkg.go.dev/crypto#Signer) + +# Credits + +* Initial work on this library was generously sponsored by HDE Inc (https://www.hde.co.jp) +* Lots of code, especially JWE was initially taken from go-jose library (https://github.com/square/go-jose) +* Lots of individual contributors have helped this project over the years. Thank each and everyone of you very much. + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go b/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go new file mode 100644 index 0000000000..1dfdec65aa --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/cert/cert.go @@ -0,0 +1,48 @@ +package cert + +import ( + "crypto/x509" + stdlibb64 "encoding/base64" + "fmt" + "io" + + "github.com/lestrrat-go/jwx/v2/internal/base64" +) + +// Create is a wrapper around x509.CreateCertificate, but it additionally +// encodes it in base64 so that it can be easily added to `x5c` fields +func Create(rand io.Reader, template, parent *x509.Certificate, pub, priv interface{}) ([]byte, error) { + der, err := x509.CreateCertificate(rand, template, parent, pub, priv) + if err != nil { + return nil, fmt.Errorf(`failed to create x509 certificate: %w`, err) + } + return EncodeBase64(der) +} + +// EncodeBase64 is a utility function to encode ASN.1 DER certificates +// using base64 encoding. This operation is normally done by `pem.Encode` +// but since PEM would include the markers (`-----BEGIN`, and the like) +// while `x5c` fields do not need this, this function can be used to +// shave off a few lines +func EncodeBase64(der []byte) ([]byte, error) { + enc := stdlibb64.StdEncoding + dst := make([]byte, enc.EncodedLen(len(der))) + enc.Encode(dst, der) + return dst, nil +} + +// Parse is a utility function to decode a base64 encoded +// ASN.1 DER format certificate, and to parse the byte sequence. +// The certificate must be in PKIX format, and it must not contain PEM markers +func Parse(src []byte) (*x509.Certificate, error) { + dst, err := base64.Decode(src) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode the certificate: %w`, err) + } + + cert, err := x509.ParseCertificate(dst) + if err != nil { + return nil, fmt.Errorf(`failed to parse x509 certificate: %w`, err) + } + return cert, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go b/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go new file mode 100644 index 0000000000..0c4746fb20 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/cert/chain.go @@ -0,0 +1,78 @@ +package cert + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// Chain represents a certificate chain as used in the `x5c` field of +// various objects within JOSE. +// +// It stores the certificates as a list of base64 encoded []byte +// sequence. By definition these values must PKIX encoded. +type Chain struct { + certificates [][]byte +} + +func (cc Chain) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte('[') + for i, cert := range cc.certificates { + if i > 0 { + buf.WriteByte(',') + } + buf.WriteByte('"') + buf.Write(cert) + buf.WriteByte('"') + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func (cc *Chain) UnmarshalJSON(data []byte) error { + var tmp []string + if err := json.Unmarshal(data, &tmp); err != nil { + return fmt.Errorf(`failed to unmarshal certificate chain: %w`, err) + } + + certs := make([][]byte, len(tmp)) + for i, cert := range tmp { + certs[i] = []byte(cert) + } + cc.certificates = certs + return nil +} + +// Get returns the n-th ASN.1 DER + base64 encoded certificate +// stored. `false` will be returned in the second argument if +// the corresponding index is out of range. +func (cc *Chain) Get(index int) ([]byte, bool) { + if index < 0 || index >= len(cc.certificates) { + return nil, false + } + + return cc.certificates[index], true +} + +// Len returns the number of certificates stored in this Chain +func (cc *Chain) Len() int { + return len(cc.certificates) +} + +var pemStart = []byte("----- BEGIN CERTIFICATE -----") +var pemEnd = []byte("----- END CERTIFICATE -----") + +func (cc *Chain) AddString(der string) error { + return cc.Add([]byte(der)) +} + +func (cc *Chain) Add(der []byte) error { + // We're going to be nice and remove marker lines if they + // give it to us + der = bytes.TrimPrefix(der, pemStart) + der = bytes.TrimSuffix(der, pemEnd) + der = bytes.TrimSpace(der) + cc.certificates = append(cc.certificates, der) + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/codecov.yml b/vendor/github.com/lestrrat-go/jwx/v2/codecov.yml new file mode 100644 index 0000000000..130effd7a6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/codecov.yml @@ -0,0 +1,2 @@ +codecov: + allow_coverage_offsets: true diff --git a/vendor/github.com/lestrrat-go/jwx/v2/format.go b/vendor/github.com/lestrrat-go/jwx/v2/format.go new file mode 100644 index 0000000000..ba721acacb --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/format.go @@ -0,0 +1,102 @@ +package jwx + +import ( + "bytes" + "encoding/json" +) + +type FormatKind int + +// These constants describe the result from guessing the format +// of the incoming buffer. +const ( + // InvalidFormat is returned when the format of the incoming buffer + // has been deemed conclusively invalid + InvalidFormat FormatKind = iota + // UnknownFormat is returned when GuessFormat was not able to conclusively + // determine the format of the + UnknownFormat + JWE + JWS + JWK + JWKS + JWT +) + +type formatHint struct { + Payload json.RawMessage `json:"payload"` // Only in JWS + Signatures json.RawMessage `json:"signatures"` // Only in JWS + Ciphertext json.RawMessage `json:"ciphertext"` // Only in JWE + KeyType json.RawMessage `json:"kty"` // Only in JWK + Keys json.RawMessage `json:"keys"` // Only in JWKS + Audience json.RawMessage `json:"aud"` // Only in JWT +} + +// GuessFormat is used to guess the format the given payload is in +// using heuristics. See the type FormatKind for a full list of +// possible types. +// +// This may be useful in determining your next action when you may +// encounter a payload that could either be a JWE, JWS, or a plain JWT. +// +// Because JWTs are almost always JWS signed, you may be thrown off +// if you pass what you think is a JWT payload to this function. +// If the function is in the "Compact" format, it means it's a JWS +// signed message, and its payload is the JWT. Therefore this function +// will reuturn JWS, not JWT. +// +// This function requires an extra parsing of the payload, and therefore +// may be inefficient if you call it every time before parsing. +func GuessFormat(payload []byte) FormatKind { + // The check against kty, keys, and aud are something this library + // made up. for the distinctions between JWE and JWS, we used + // https://datatracker.ietf.org/doc/html/rfc7516#section-9. + // + // The above RFC described several ways to distinguish between + // a JWE and JWS JSON, but we're only using one of them + + payload = bytes.TrimSpace(payload) + if len(payload) <= 0 { + return UnknownFormat + } + + if payload[0] != '{' { + // Compact format. It's probably a JWS or JWE + sep := []byte{'.'} // I want to const this :/ + + // Note: this counts the number of occurrences of the + // separator, but the RFC talks about the number of segments. + // number of '.' == segments - 1, so that's why we have 2 and 4 here + switch count := bytes.Count(payload, sep); count { + case 2: + return JWS + case 4: + return JWE + default: + return InvalidFormat + } + } + + // If we got here, we probably have JSON. + var h formatHint + if err := json.Unmarshal(payload, &h); err != nil { + return UnknownFormat + } + + if h.Audience != nil { + return JWT + } + if h.KeyType != nil { + return JWK + } + if h.Keys != nil { + return JWKS + } + if h.Ciphertext != nil { + return JWE + } + if h.Signatures != nil && h.Payload != nil { + return JWS + } + return UnknownFormat +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/formatkind_string_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/formatkind_string_gen.go new file mode 100644 index 0000000000..38abd1bc47 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/formatkind_string_gen.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=FormatKind"; DO NOT EDIT. + +package jwx + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidFormat-0] + _ = x[UnknownFormat-1] + _ = x[JWE-2] + _ = x[JWS-3] + _ = x[JWK-4] + _ = x[JWKS-5] + _ = x[JWT-6] +} + +const _FormatKind_name = "InvalidFormatUnknownFormatJWEJWSJWKJWKSJWT" + +var _FormatKind_index = [...]uint8{0, 13, 26, 29, 32, 35, 39, 42} + +func (i FormatKind) String() string { + if i < 0 || i >= FormatKind(len(_FormatKind_index)-1) { + return "FormatKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FormatKind_name[_FormatKind_index[i]:_FormatKind_index[i+1]] +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go new file mode 100644 index 0000000000..bc494bcf80 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/base64/base64.go @@ -0,0 +1,65 @@ +package base64 + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" +) + +func Encode(src []byte) []byte { + enc := base64.RawURLEncoding + dst := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(dst, src) + return dst +} + +func EncodeToStringStd(src []byte) string { + return base64.StdEncoding.EncodeToString(src) +} + +func EncodeToString(src []byte) string { + return base64.RawURLEncoding.EncodeToString(src) +} + +func EncodeUint64ToString(v uint64) string { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, v) + + i := 0 + for ; i < len(data); i++ { + if data[i] != 0x0 { + break + } + } + + return EncodeToString(data[i:]) +} + +func Decode(src []byte) ([]byte, error) { + var enc *base64.Encoding + + var isRaw = !bytes.HasSuffix(src, []byte{'='}) + var isURL = !bytes.ContainsAny(src, "+/") + switch { + case isRaw && isURL: + enc = base64.RawURLEncoding + case isURL: + enc = base64.URLEncoding + case isRaw: + enc = base64.RawStdEncoding + default: + enc = base64.StdEncoding + } + + dst := make([]byte, enc.DecodedLen(len(src))) + n, err := enc.Decode(dst, src) + if err != nil { + return nil, fmt.Errorf(`failed to decode source: %w`, err) + } + return dst[:n], nil +} + +func DecodeString(src string) ([]byte, error) { + return Decode([]byte(src)) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go new file mode 100644 index 0000000000..e70f81659d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/ecutil/ecutil.go @@ -0,0 +1,110 @@ +// Package ecutil defines tools that help with elliptic curve related +// computation +package ecutil + +import ( + "crypto/elliptic" + "math/big" + "sync" + + "github.com/lestrrat-go/jwx/v2/jwa" +) + +// data for available curves. Some algorithms may be compiled in/out +var curveToAlg = map[elliptic.Curve]jwa.EllipticCurveAlgorithm{} +var algToCurve = map[jwa.EllipticCurveAlgorithm]elliptic.Curve{} +var availableAlgs []jwa.EllipticCurveAlgorithm +var availableCrvs []elliptic.Curve + +func RegisterCurve(crv elliptic.Curve, alg jwa.EllipticCurveAlgorithm) { + curveToAlg[crv] = alg + algToCurve[alg] = crv + availableAlgs = append(availableAlgs, alg) + availableCrvs = append(availableCrvs, crv) +} + +func IsAvailable(alg jwa.EllipticCurveAlgorithm) bool { + _, ok := algToCurve[alg] + return ok +} + +func AvailableAlgorithms() []jwa.EllipticCurveAlgorithm { + return availableAlgs +} + +func AvailableCurves() []elliptic.Curve { + return availableCrvs +} + +func AlgorithmForCurve(crv elliptic.Curve) (jwa.EllipticCurveAlgorithm, bool) { + v, ok := curveToAlg[crv] + return v, ok +} + +func CurveForAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, bool) { + v, ok := algToCurve[alg] + return v, ok +} + +const ( + // size of buffer that needs to be allocated for EC521 curve + ec521BufferSize = 66 // (521 / 8) + 1 +) + +var ecpointBufferPool = sync.Pool{ + New: func() interface{} { + // In most cases the curve bit size will be less than this length + // so allocate the maximum, and keep reusing + buf := make([]byte, 0, ec521BufferSize) + return &buf + }, +} + +func getCrvFixedBuffer(size int) []byte { + //nolint:forcetypeassert + buf := *(ecpointBufferPool.Get().(*[]byte)) + if size > ec521BufferSize && cap(buf) < size { + buf = append(buf, make([]byte, size-cap(buf))...) + } + return buf[:size] +} + +// ReleaseECPointBuffer releases the []byte buffer allocated. +func ReleaseECPointBuffer(buf []byte) { + buf = buf[:cap(buf)] + buf[0] = 0x0 + for i := 1; i < len(buf); i *= 2 { + copy(buf[i:], buf[:i]) + } + buf = buf[:0] + ecpointBufferPool.Put(&buf) +} + +// AllocECPointBuffer allocates a buffer for the given point in the given +// curve. This buffer should be released using the ReleaseECPointBuffer +// function. +func AllocECPointBuffer(v *big.Int, crv elliptic.Curve) []byte { + // We need to create a buffer that fits the entire curve. + // If the curve size is 66, that fits in 9 bytes. If the curve + // size is 64, it fits in 8 bytes. + bits := crv.Params().BitSize + + // For most common cases we know before hand what the byte length + // is going to be. optimize + var inBytes int + switch bits { + case 224, 256, 384: // TODO: use constant? + inBytes = bits / 8 + case 521: + inBytes = ec521BufferSize + default: + inBytes = bits / 8 + if (bits % 8) != 0 { + inBytes++ + } + } + + buf := getCrvFixedBuffer(inBytes) + v.FillBytes(buf) + return buf +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go new file mode 100644 index 0000000000..c98fd46c3e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/iter/mapiter.go @@ -0,0 +1,36 @@ +package iter + +import ( + "context" + "fmt" + + "github.com/lestrrat-go/iter/mapiter" +) + +// MapVisitor is a specialized visitor for our purposes. +// Whereas mapiter.Visitor supports any type of key, this +// visitor assumes the key is a string +type MapVisitor interface { + Visit(string, interface{}) error +} + +type MapVisitorFunc func(string, interface{}) error + +func (fn MapVisitorFunc) Visit(s string, v interface{}) error { + return fn(s, v) +} + +func WalkMap(ctx context.Context, src mapiter.Source, visitor MapVisitor) error { + return mapiter.Walk(ctx, src, mapiter.VisitorFunc(func(k, v interface{}) error { + //nolint:forcetypeassert + return visitor.Visit(k.(string), v) + })) +} + +func AsMap(ctx context.Context, src mapiter.Source) (map[string]interface{}, error) { + var m map[string]interface{} + if err := mapiter.AsMap(ctx, src, &m); err != nil { + return nil, fmt.Errorf(`mapiter.AsMap failed: %w`, err) + } + return m, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go new file mode 100644 index 0000000000..59682104b5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/goccy.go @@ -0,0 +1,51 @@ +//go:build jwx_goccy +// +build jwx_goccy + +package json + +import ( + "io" + + "github.com/goccy/go-json" +) + +type Decoder = json.Decoder +type Delim = json.Delim +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Number = json.Number +type RawMessage = json.RawMessage +type Unmarshaler = json.Unmarshaler + +func Engine() string { + return "github.com/goccy/go-json" +} + +// NewDecoder respects the values specified in DecoderSettings, +// and creates a Decoder that has certain features turned on/off +func NewDecoder(r io.Reader) *json.Decoder { + dec := json.NewDecoder(r) + + muGlobalConfig.RLock() + if useNumber { + dec.UseNumber() + } + muGlobalConfig.RUnlock() + + return dec +} + +// NewEncoder is just a proxy for "encoding/json".NewEncoder +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal is just a proxy for "encoding/json".Marshal +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is just a proxy for "encoding/json".MarshalIndent +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go new file mode 100644 index 0000000000..a4f1026a5a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/json.go @@ -0,0 +1,112 @@ +package json + +import ( + "bytes" + "fmt" + "os" + "sync" + + "github.com/lestrrat-go/jwx/v2/internal/base64" +) + +var muGlobalConfig sync.RWMutex +var useNumber bool + +// Sets the global configuration for json decoding +func DecoderSettings(inUseNumber bool) { + muGlobalConfig.Lock() + useNumber = inUseNumber + muGlobalConfig.Unlock() +} + +// Unmarshal respects the values specified in DecoderSettings, +// and uses a Decoder that has certain features turned on/off +func Unmarshal(b []byte, v interface{}) error { + dec := NewDecoder(bytes.NewReader(b)) + return dec.Decode(v) +} + +func AssignNextBytesToken(dst *[]byte, dec *Decoder) error { + var val string + if err := dec.Decode(&val); err != nil { + return fmt.Errorf(`error reading next value: %w`, err) + } + + buf, err := base64.DecodeString(val) + if err != nil { + return fmt.Errorf(`expected base64 encoded []byte (%T)`, val) + } + *dst = buf + return nil +} + +func ReadNextStringToken(dec *Decoder) (string, error) { + var val string + if err := dec.Decode(&val); err != nil { + return "", fmt.Errorf(`error reading next value: %w`, err) + } + return val, nil +} + +func AssignNextStringToken(dst **string, dec *Decoder) error { + val, err := ReadNextStringToken(dec) + if err != nil { + return err + } + *dst = &val + return nil +} + +// FlattenAudience is a flag to specify if we should flatten the "aud" +// entry to a string when there's only one entry. +// In jwx < 1.1.8 we just dumped everything as an array of strings, +// but apparently AWS Cognito doesn't handle this well. +// +// So now we have the ability to dump "aud" as a string if there's +// only one entry, but we need to retain the old behavior so that +// we don't accidentally break somebody else's code. (e.g. messing +// up how signatures are calculated) +var FlattenAudience uint32 + +func EncodeAudience(enc *Encoder, aud []string, flatten bool) error { + var val interface{} + if len(aud) == 1 && flatten { + val = aud[0] + } else { + val = aud + } + return enc.Encode(val) +} + +// DecodeCtx is an interface for objects that needs that extra something +// when decoding JSON into an object. +type DecodeCtx interface { + Registry() *Registry +} + +// DecodeCtxContainer is used to differentiate objects that can carry extra +// decoding hints and those who can't. +type DecodeCtxContainer interface { + DecodeCtx() DecodeCtx + SetDecodeCtx(DecodeCtx) +} + +// stock decodeCtx. should cover 80% of the cases +type decodeCtx struct { + registry *Registry +} + +func NewDecodeCtx(r *Registry) DecodeCtx { + return &decodeCtx{registry: r} +} + +func (dc *decodeCtx) Registry() *Registry { + return dc.registry +} + +func Dump(v interface{}) { + enc := NewEncoder(os.Stdout) + enc.SetIndent("", " ") + //nolint:errchkjson + _ = enc.Encode(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go new file mode 100644 index 0000000000..4830e86de3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/registry.go @@ -0,0 +1,52 @@ +package json + +import ( + "fmt" + "reflect" + "sync" +) + +type Registry struct { + mu *sync.RWMutex + data map[string]reflect.Type +} + +func NewRegistry() *Registry { + return &Registry{ + mu: &sync.RWMutex{}, + data: make(map[string]reflect.Type), + } +} + +func (r *Registry) Register(name string, object interface{}) { + if object == nil { + r.mu.Lock() + defer r.mu.Unlock() + delete(r.data, name) + return + } + + typ := reflect.TypeOf(object) + r.mu.Lock() + defer r.mu.Unlock() + r.data[name] = typ +} + +func (r *Registry) Decode(dec *Decoder, name string) (interface{}, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + if typ, ok := r.data[name]; ok { + ptr := reflect.New(typ).Interface() + if err := dec.Decode(ptr); err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err) + } + return reflect.ValueOf(ptr).Elem().Interface(), nil + } + + var decoded interface{} + if err := dec.Decode(&decoded); err != nil { + return nil, fmt.Errorf(`failed to decode field %s: %w`, name, err) + } + return decoded, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go new file mode 100644 index 0000000000..62b1a5ff51 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/json/stdlib.go @@ -0,0 +1,49 @@ +//go:build !jwx_goccy +// +build !jwx_goccy + +package json + +import ( + "encoding/json" + "io" +) + +type Decoder = json.Decoder +type Delim = json.Delim +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Number = json.Number +type RawMessage = json.RawMessage +type Unmarshaler = json.Unmarshaler + +func Engine() string { + return "encoding/json" +} + +// NewDecoder respects the values specified in DecoderSettings, +// and creates a Decoder that has certain features turned on/off +func NewDecoder(r io.Reader) *json.Decoder { + dec := json.NewDecoder(r) + + muGlobalConfig.RLock() + if useNumber { + dec.UseNumber() + } + muGlobalConfig.RUnlock() + + return dec +} + +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal is just a proxy for "encoding/json".Marshal +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is just a proxy for "encoding/json".MarshalIndent +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go new file mode 100644 index 0000000000..807da1dee6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/keyconv/keyconv.go @@ -0,0 +1,177 @@ +package keyconv + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/jwk" + "golang.org/x/crypto/ed25519" +) + +// RSAPrivateKey assigns src to dst. +// `dst` should be a pointer to a rsa.PrivateKey. +// `src` may be rsa.PrivateKey, *rsa.PrivateKey, or a jwk.Key +func RSAPrivateKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw rsa.PrivateKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce rsa.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *rsa.PrivateKey + switch src := src.(type) { + case rsa.PrivateKey: + ptr = &src + case *rsa.PrivateKey: + ptr = src + default: + return fmt.Errorf(`expected rsa.PrivateKey or *rsa.PrivateKey, got %T`, src) + } + + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// RSAPublicKey assigns src to dst +// `dst` should be a pointer to a non-zero rsa.PublicKey. +// `src` may be rsa.PublicKey, *rsa.PublicKey, or a jwk.Key +func RSAPublicKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw rsa.PublicKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce rsa.PublicKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *rsa.PublicKey + switch src := src.(type) { + case rsa.PublicKey: + ptr = &src + case *rsa.PublicKey: + ptr = src + default: + return fmt.Errorf(`expected rsa.PublicKey or *rsa.PublicKey, got %T`, src) + } + + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// ECDSAPrivateKey assigns src to dst, converting its type from a +// non-pointer to a pointer +func ECDSAPrivateKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ecdsa.PrivateKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce ecdsa.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ecdsa.PrivateKey + switch src := src.(type) { + case ecdsa.PrivateKey: + ptr = &src + case *ecdsa.PrivateKey: + ptr = src + default: + return fmt.Errorf(`expected ecdsa.PrivateKey or *ecdsa.PrivateKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +// ECDSAPublicKey assigns src to dst, converting its type from a +// non-pointer to a pointer +func ECDSAPublicKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ecdsa.PublicKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce ecdsa.PublicKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ecdsa.PublicKey + switch src := src.(type) { + case ecdsa.PublicKey: + ptr = &src + case *ecdsa.PublicKey: + ptr = src + default: + return fmt.Errorf(`expected ecdsa.PublicKey or *ecdsa.PublicKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +func ByteSliceKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw []byte + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce []byte from %T: %w`, src, err) + } + src = raw + } + + if _, ok := src.([]byte); !ok { + return fmt.Errorf(`expected []byte, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, src) +} + +func Ed25519PrivateKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ed25519.PrivateKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce ed25519.PrivateKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ed25519.PrivateKey + switch src := src.(type) { + case ed25519.PrivateKey: + ptr = &src + case *ed25519.PrivateKey: + ptr = src + default: + return fmt.Errorf(`expected ed25519.PrivateKey or *ed25519.PrivateKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} + +func Ed25519PublicKey(dst, src interface{}) error { + if jwkKey, ok := src.(jwk.Key); ok { + var raw ed25519.PublicKey + if err := jwkKey.Raw(&raw); err != nil { + return fmt.Errorf(`failed to produce ed25519.PublicKey from %T: %w`, src, err) + } + src = &raw + } + + var ptr *ed25519.PublicKey + switch src := src.(type) { + case ed25519.PublicKey: + ptr = &src + case *ed25519.PublicKey: + ptr = src + case *crypto.PublicKey: + tmp, ok := (*src).(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of *crypto.PublicKey`) + } + ptr = &tmp + case crypto.PublicKey: + tmp, ok := src.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of crypto.PublicKey`) + } + ptr = &tmp + default: + return fmt.Errorf(`expected ed25519.PublicKey or *ed25519.PublicKey, got %T`, src) + } + return blackmagic.AssignIfCompatible(dst, ptr) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go new file mode 100644 index 0000000000..fae560b7c5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/internal/pool/pool.go @@ -0,0 +1,61 @@ +package pool + +import ( + "bytes" + "math/big" + "sync" +) + +var bytesBufferPool = sync.Pool{ + New: allocBytesBuffer, +} + +func allocBytesBuffer() interface{} { + return &bytes.Buffer{} +} + +func GetBytesBuffer() *bytes.Buffer { + //nolint:forcetypeassert + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func ReleaseBytesBuffer(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} + +var bigIntPool = sync.Pool{ + New: allocBigInt, +} + +func allocBigInt() interface{} { + return &big.Int{} +} + +func GetBigInt() *big.Int { + //nolint:forcetypeassert + return bigIntPool.Get().(*big.Int) +} + +func ReleaseBigInt(i *big.Int) { + bigIntPool.Put(i.SetInt64(0)) +} + +var keyToErrorMapPool = sync.Pool{ + New: allocKeyToErrorMap, +} + +func allocKeyToErrorMap() interface{} { + return make(map[string]error) +} + +func GetKeyToErrorMap() map[string]error { + //nolint:forcetypeassert + return keyToErrorMapPool.Get().(map[string]error) +} + +func ReleaseKeyToErrorMap(m map[string]error) { + for key := range m { + delete(m, key) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md new file mode 100644 index 0000000000..d62f29276a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/README.md @@ -0,0 +1,3 @@ +# JWA [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwa.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwa) + +Package [github.com/lestrrat-go/jwx/v2/jwa](./jwa) defines the various algorithm described in [RFC7518](https://tools.ietf.org/html/rfc7518) diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go new file mode 100644 index 0000000000..1649b4a55b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/compression_gen.go @@ -0,0 +1,71 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// CompressionAlgorithm represents the compression algorithms as described in https://tools.ietf.org/html/rfc7518#section-7.3 +type CompressionAlgorithm string + +// Supported values for CompressionAlgorithm +const ( + Deflate CompressionAlgorithm = "DEF" // DEFLATE (RFC 1951) + NoCompress CompressionAlgorithm = "" // No compression +) + +var allCompressionAlgorithms = map[CompressionAlgorithm]struct{}{ + Deflate: {}, + NoCompress: {}, +} + +var listCompressionAlgorithmOnce sync.Once +var listCompressionAlgorithm []CompressionAlgorithm + +// CompressionAlgorithms returns a list of all available values for CompressionAlgorithm +func CompressionAlgorithms() []CompressionAlgorithm { + listCompressionAlgorithmOnce.Do(func() { + listCompressionAlgorithm = make([]CompressionAlgorithm, 0, len(allCompressionAlgorithms)) + for v := range allCompressionAlgorithms { + listCompressionAlgorithm = append(listCompressionAlgorithm, v) + } + sort.Slice(listCompressionAlgorithm, func(i, j int) bool { + return string(listCompressionAlgorithm[i]) < string(listCompressionAlgorithm[j]) + }) + }) + return listCompressionAlgorithm +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *CompressionAlgorithm) Accept(value interface{}) error { + var tmp CompressionAlgorithm + if x, ok := value.(CompressionAlgorithm); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.CompressionAlgorithm: %T`, value) + } + tmp = CompressionAlgorithm(s) + } + if _, ok := allCompressionAlgorithms[tmp]; !ok { + return fmt.Errorf(`invalid jwa.CompressionAlgorithm value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a CompressionAlgorithm +func (v CompressionAlgorithm) String() string { + return string(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go new file mode 100644 index 0000000000..fe0e062f62 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/content_encryption_gen.go @@ -0,0 +1,79 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// ContentEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-5 +type ContentEncryptionAlgorithm string + +// Supported values for ContentEncryptionAlgorithm +const ( + A128CBC_HS256 ContentEncryptionAlgorithm = "A128CBC-HS256" // AES-CBC + HMAC-SHA256 (128) + A128GCM ContentEncryptionAlgorithm = "A128GCM" // AES-GCM (128) + A192CBC_HS384 ContentEncryptionAlgorithm = "A192CBC-HS384" // AES-CBC + HMAC-SHA384 (192) + A192GCM ContentEncryptionAlgorithm = "A192GCM" // AES-GCM (192) + A256CBC_HS512 ContentEncryptionAlgorithm = "A256CBC-HS512" // AES-CBC + HMAC-SHA512 (256) + A256GCM ContentEncryptionAlgorithm = "A256GCM" // AES-GCM (256) +) + +var allContentEncryptionAlgorithms = map[ContentEncryptionAlgorithm]struct{}{ + A128CBC_HS256: {}, + A128GCM: {}, + A192CBC_HS384: {}, + A192GCM: {}, + A256CBC_HS512: {}, + A256GCM: {}, +} + +var listContentEncryptionAlgorithmOnce sync.Once +var listContentEncryptionAlgorithm []ContentEncryptionAlgorithm + +// ContentEncryptionAlgorithms returns a list of all available values for ContentEncryptionAlgorithm +func ContentEncryptionAlgorithms() []ContentEncryptionAlgorithm { + listContentEncryptionAlgorithmOnce.Do(func() { + listContentEncryptionAlgorithm = make([]ContentEncryptionAlgorithm, 0, len(allContentEncryptionAlgorithms)) + for v := range allContentEncryptionAlgorithms { + listContentEncryptionAlgorithm = append(listContentEncryptionAlgorithm, v) + } + sort.Slice(listContentEncryptionAlgorithm, func(i, j int) bool { + return string(listContentEncryptionAlgorithm[i]) < string(listContentEncryptionAlgorithm[j]) + }) + }) + return listContentEncryptionAlgorithm +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *ContentEncryptionAlgorithm) Accept(value interface{}) error { + var tmp ContentEncryptionAlgorithm + if x, ok := value.(ContentEncryptionAlgorithm); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.ContentEncryptionAlgorithm: %T`, value) + } + tmp = ContentEncryptionAlgorithm(s) + } + if _, ok := allContentEncryptionAlgorithms[tmp]; !ok { + return fmt.Errorf(`invalid jwa.ContentEncryptionAlgorithm value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a ContentEncryptionAlgorithm +func (v ContentEncryptionAlgorithm) String() string { + return string(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go new file mode 100644 index 0000000000..d948e07981 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/elliptic_gen.go @@ -0,0 +1,82 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// EllipticCurveAlgorithm represents the algorithms used for EC keys +type EllipticCurveAlgorithm string + +// Supported values for EllipticCurveAlgorithm +const ( + Ed25519 EllipticCurveAlgorithm = "Ed25519" + Ed448 EllipticCurveAlgorithm = "Ed448" + InvalidEllipticCurve EllipticCurveAlgorithm = "P-invalid" + P256 EllipticCurveAlgorithm = "P-256" + P384 EllipticCurveAlgorithm = "P-384" + P521 EllipticCurveAlgorithm = "P-521" + X25519 EllipticCurveAlgorithm = "X25519" + X448 EllipticCurveAlgorithm = "X448" +) + +var allEllipticCurveAlgorithms = map[EllipticCurveAlgorithm]struct{}{ + Ed25519: {}, + Ed448: {}, + P256: {}, + P384: {}, + P521: {}, + X25519: {}, + X448: {}, +} + +var listEllipticCurveAlgorithmOnce sync.Once +var listEllipticCurveAlgorithm []EllipticCurveAlgorithm + +// EllipticCurveAlgorithms returns a list of all available values for EllipticCurveAlgorithm +func EllipticCurveAlgorithms() []EllipticCurveAlgorithm { + listEllipticCurveAlgorithmOnce.Do(func() { + listEllipticCurveAlgorithm = make([]EllipticCurveAlgorithm, 0, len(allEllipticCurveAlgorithms)) + for v := range allEllipticCurveAlgorithms { + listEllipticCurveAlgorithm = append(listEllipticCurveAlgorithm, v) + } + sort.Slice(listEllipticCurveAlgorithm, func(i, j int) bool { + return string(listEllipticCurveAlgorithm[i]) < string(listEllipticCurveAlgorithm[j]) + }) + }) + return listEllipticCurveAlgorithm +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *EllipticCurveAlgorithm) Accept(value interface{}) error { + var tmp EllipticCurveAlgorithm + if x, ok := value.(EllipticCurveAlgorithm); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.EllipticCurveAlgorithm: %T`, value) + } + tmp = EllipticCurveAlgorithm(s) + } + if _, ok := allEllipticCurveAlgorithms[tmp]; !ok { + return fmt.Errorf(`invalid jwa.EllipticCurveAlgorithm value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a EllipticCurveAlgorithm +func (v EllipticCurveAlgorithm) String() string { + return string(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go new file mode 100644 index 0000000000..f9ce38e04c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/jwa.go @@ -0,0 +1,61 @@ +//go:generate ../tools/cmd/genjwa.sh + +// Package jwa defines the various algorithm described in https://tools.ietf.org/html/rfc7518 +package jwa + +import "fmt" + +// KeyAlgorithm is a workaround for jwk.Key being able to contain different +// types of algorithms in its `alg` field. +// +// Previously the storage for the `alg` field was represented as a string, +// but this caused some users to wonder why the field was not typed appropriately +// like other fields. +// +// Ideally we would like to keep track of Signature Algorithms and +// Content Encryption Algorithms separately, and force the APIs to +// type-check at compile time, but this allows users to pass a value from a +// jwk.Key directly +type KeyAlgorithm interface { + String() string +} + +// InvalidKeyAlgorithm represents an algorithm that the library is not aware of. +type InvalidKeyAlgorithm string + +func (s InvalidKeyAlgorithm) String() string { + return string(s) +} + +func (InvalidKeyAlgorithm) Accept(_ interface{}) error { + return fmt.Errorf(`jwa.InvalidKeyAlgorithm does not support Accept() method calls`) +} + +// KeyAlgorithmFrom takes either a string, `jwa.SignatureAlgorithm` or `jwa.KeyEncryptionAlgorithm` +// and returns a `jwa.KeyAlgorithm`. +// +// If the value cannot be handled, it returns an `jwa.InvalidKeyAlgorithm` +// object instead of returning an error. This design choice was made to allow +// users to directly pass the return value to functions such as `jws.Sign()` +func KeyAlgorithmFrom(v interface{}) KeyAlgorithm { + switch v := v.(type) { + case SignatureAlgorithm: + return v + case KeyEncryptionAlgorithm: + return v + case string: + var salg SignatureAlgorithm + if err := salg.Accept(v); err == nil { + return salg + } + + var kealg KeyEncryptionAlgorithm + if err := kealg.Accept(v); err == nil { + return kealg + } + + return InvalidKeyAlgorithm(v) + default: + return InvalidKeyAlgorithm(fmt.Sprintf("%s", v)) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go new file mode 100644 index 0000000000..80a97d9b8a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_encryption_gen.go @@ -0,0 +1,110 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// KeyEncryptionAlgorithm represents the various encryption algorithms as described in https://tools.ietf.org/html/rfc7518#section-4.1 +type KeyEncryptionAlgorithm string + +// Supported values for KeyEncryptionAlgorithm +const ( + A128GCMKW KeyEncryptionAlgorithm = "A128GCMKW" // AES-GCM key wrap (128) + A128KW KeyEncryptionAlgorithm = "A128KW" // AES key wrap (128) + A192GCMKW KeyEncryptionAlgorithm = "A192GCMKW" // AES-GCM key wrap (192) + A192KW KeyEncryptionAlgorithm = "A192KW" // AES key wrap (192) + A256GCMKW KeyEncryptionAlgorithm = "A256GCMKW" // AES-GCM key wrap (256) + A256KW KeyEncryptionAlgorithm = "A256KW" // AES key wrap (256) + DIRECT KeyEncryptionAlgorithm = "dir" // Direct encryption + ECDH_ES KeyEncryptionAlgorithm = "ECDH-ES" // ECDH-ES + ECDH_ES_A128KW KeyEncryptionAlgorithm = "ECDH-ES+A128KW" // ECDH-ES + AES key wrap (128) + ECDH_ES_A192KW KeyEncryptionAlgorithm = "ECDH-ES+A192KW" // ECDH-ES + AES key wrap (192) + ECDH_ES_A256KW KeyEncryptionAlgorithm = "ECDH-ES+A256KW" // ECDH-ES + AES key wrap (256) + PBES2_HS256_A128KW KeyEncryptionAlgorithm = "PBES2-HS256+A128KW" // PBES2 + HMAC-SHA256 + AES key wrap (128) + PBES2_HS384_A192KW KeyEncryptionAlgorithm = "PBES2-HS384+A192KW" // PBES2 + HMAC-SHA384 + AES key wrap (192) + PBES2_HS512_A256KW KeyEncryptionAlgorithm = "PBES2-HS512+A256KW" // PBES2 + HMAC-SHA512 + AES key wrap (256) + RSA1_5 KeyEncryptionAlgorithm = "RSA1_5" // RSA-PKCS1v1.5 + RSA_OAEP KeyEncryptionAlgorithm = "RSA-OAEP" // RSA-OAEP-SHA1 + RSA_OAEP_256 KeyEncryptionAlgorithm = "RSA-OAEP-256" // RSA-OAEP-SHA256 +) + +var allKeyEncryptionAlgorithms = map[KeyEncryptionAlgorithm]struct{}{ + A128GCMKW: {}, + A128KW: {}, + A192GCMKW: {}, + A192KW: {}, + A256GCMKW: {}, + A256KW: {}, + DIRECT: {}, + ECDH_ES: {}, + ECDH_ES_A128KW: {}, + ECDH_ES_A192KW: {}, + ECDH_ES_A256KW: {}, + PBES2_HS256_A128KW: {}, + PBES2_HS384_A192KW: {}, + PBES2_HS512_A256KW: {}, + RSA1_5: {}, + RSA_OAEP: {}, + RSA_OAEP_256: {}, +} + +var listKeyEncryptionAlgorithmOnce sync.Once +var listKeyEncryptionAlgorithm []KeyEncryptionAlgorithm + +// KeyEncryptionAlgorithms returns a list of all available values for KeyEncryptionAlgorithm +func KeyEncryptionAlgorithms() []KeyEncryptionAlgorithm { + listKeyEncryptionAlgorithmOnce.Do(func() { + listKeyEncryptionAlgorithm = make([]KeyEncryptionAlgorithm, 0, len(allKeyEncryptionAlgorithms)) + for v := range allKeyEncryptionAlgorithms { + listKeyEncryptionAlgorithm = append(listKeyEncryptionAlgorithm, v) + } + sort.Slice(listKeyEncryptionAlgorithm, func(i, j int) bool { + return string(listKeyEncryptionAlgorithm[i]) < string(listKeyEncryptionAlgorithm[j]) + }) + }) + return listKeyEncryptionAlgorithm +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *KeyEncryptionAlgorithm) Accept(value interface{}) error { + var tmp KeyEncryptionAlgorithm + if x, ok := value.(KeyEncryptionAlgorithm); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.KeyEncryptionAlgorithm: %T`, value) + } + tmp = KeyEncryptionAlgorithm(s) + } + if _, ok := allKeyEncryptionAlgorithms[tmp]; !ok { + return fmt.Errorf(`invalid jwa.KeyEncryptionAlgorithm value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a KeyEncryptionAlgorithm +func (v KeyEncryptionAlgorithm) String() string { + return string(v) +} + +// IsSymmetric returns true if the algorithm is a symmetric type +func (v KeyEncryptionAlgorithm) IsSymmetric() bool { + switch v { + case A128GCMKW, A128KW, A192GCMKW, A192KW, A256GCMKW, A256KW, DIRECT, PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + return true + } + return false +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go new file mode 100644 index 0000000000..a55da787ed --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/key_type_gen.go @@ -0,0 +1,76 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// KeyType represents the key type ("kty") that are supported +type KeyType string + +// Supported values for KeyType +const ( + EC KeyType = "EC" // Elliptic Curve + InvalidKeyType KeyType = "" // Invalid KeyType + OKP KeyType = "OKP" // Octet string key pairs + OctetSeq KeyType = "oct" // Octet sequence (used to represent symmetric keys) + RSA KeyType = "RSA" // RSA +) + +var allKeyTypes = map[KeyType]struct{}{ + EC: {}, + OKP: {}, + OctetSeq: {}, + RSA: {}, +} + +var listKeyTypeOnce sync.Once +var listKeyType []KeyType + +// KeyTypes returns a list of all available values for KeyType +func KeyTypes() []KeyType { + listKeyTypeOnce.Do(func() { + listKeyType = make([]KeyType, 0, len(allKeyTypes)) + for v := range allKeyTypes { + listKeyType = append(listKeyType, v) + } + sort.Slice(listKeyType, func(i, j int) bool { + return string(listKeyType[i]) < string(listKeyType[j]) + }) + }) + return listKeyType +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *KeyType) Accept(value interface{}) error { + var tmp KeyType + if x, ok := value.(KeyType); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.KeyType: %T`, value) + } + tmp = KeyType(s) + } + if _, ok := allKeyTypes[tmp]; !ok { + return fmt.Errorf(`invalid jwa.KeyType value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a KeyType +func (v KeyType) String() string { + return string(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go new file mode 100644 index 0000000000..a6da0dde91 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/secp2561k.go @@ -0,0 +1,11 @@ +//go:build jwx_es256k +// +build jwx_es256k + +package jwa + +// This constant is only available if compiled with jwx_es256k build tag +const Secp256k1 EllipticCurveAlgorithm = "secp256k1" + +func init() { + allEllipticCurveAlgorithms[Secp256k1] = struct{}{} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go new file mode 100644 index 0000000000..40cb017907 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwa/signature_gen.go @@ -0,0 +1,97 @@ +// this file was auto-generated by internal/cmd/gentypes/main.go: DO NOT EDIT + +package jwa + +import ( + "fmt" + "sort" + "sync" +) + +// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1 +type SignatureAlgorithm string + +// Supported values for SignatureAlgorithm +const ( + ES256 SignatureAlgorithm = "ES256" // ECDSA using P-256 and SHA-256 + ES256K SignatureAlgorithm = "ES256K" // ECDSA using secp256k1 and SHA-256 + ES384 SignatureAlgorithm = "ES384" // ECDSA using P-384 and SHA-384 + ES512 SignatureAlgorithm = "ES512" // ECDSA using P-521 and SHA-512 + EdDSA SignatureAlgorithm = "EdDSA" // EdDSA signature algorithms + HS256 SignatureAlgorithm = "HS256" // HMAC using SHA-256 + HS384 SignatureAlgorithm = "HS384" // HMAC using SHA-384 + HS512 SignatureAlgorithm = "HS512" // HMAC using SHA-512 + NoSignature SignatureAlgorithm = "none" + PS256 SignatureAlgorithm = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 + PS384 SignatureAlgorithm = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 + PS512 SignatureAlgorithm = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 + RS256 SignatureAlgorithm = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 + RS384 SignatureAlgorithm = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 + RS512 SignatureAlgorithm = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 +) + +var allSignatureAlgorithms = map[SignatureAlgorithm]struct{}{ + ES256: {}, + ES256K: {}, + ES384: {}, + ES512: {}, + EdDSA: {}, + HS256: {}, + HS384: {}, + HS512: {}, + NoSignature: {}, + PS256: {}, + PS384: {}, + PS512: {}, + RS256: {}, + RS384: {}, + RS512: {}, +} + +var listSignatureAlgorithmOnce sync.Once +var listSignatureAlgorithm []SignatureAlgorithm + +// SignatureAlgorithms returns a list of all available values for SignatureAlgorithm +func SignatureAlgorithms() []SignatureAlgorithm { + listSignatureAlgorithmOnce.Do(func() { + listSignatureAlgorithm = make([]SignatureAlgorithm, 0, len(allSignatureAlgorithms)) + for v := range allSignatureAlgorithms { + listSignatureAlgorithm = append(listSignatureAlgorithm, v) + } + sort.Slice(listSignatureAlgorithm, func(i, j int) bool { + return string(listSignatureAlgorithm[i]) < string(listSignatureAlgorithm[j]) + }) + }) + return listSignatureAlgorithm +} + +// Accept is used when conversion from values given by +// outside sources (such as JSON payloads) is required +func (v *SignatureAlgorithm) Accept(value interface{}) error { + var tmp SignatureAlgorithm + if x, ok := value.(SignatureAlgorithm); ok { + tmp = x + } else { + var s string + switch x := value.(type) { + case fmt.Stringer: + s = x.String() + case string: + s = x + default: + return fmt.Errorf(`invalid type for jwa.SignatureAlgorithm: %T`, value) + } + tmp = SignatureAlgorithm(s) + } + if _, ok := allSignatureAlgorithms[tmp]; !ok { + return fmt.Errorf(`invalid jwa.SignatureAlgorithm value`) + } + + *v = tmp + return nil +} + +// String returns the string representation of a SignatureAlgorithm +func (v SignatureAlgorithm) String() string { + return string(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwe/README.md new file mode 100644 index 0000000000..542172d53f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/README.md @@ -0,0 +1,94 @@ +# JWE [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwe.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwe) + +Package jwe implements JWE as described in [RFC7516](https://tools.ietf.org/html/rfc7516) + +* Encrypt and Decrypt arbitrary data +* Content compression and decompression +* Add arbitrary fields in the JWE header object + +How-to style documentation can be found in the [docs directory](../docs). + +Examples are located in the examples directory ([jwe_example_test.go](../examples/jwe_example_test.go)) + +Supported key encryption algorithm: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:-----------------------------------------|:-----------|:-------------------------| +| RSA-PKCS1v1.5 | YES | jwa.RSA1_5 | +| RSA-OAEP-SHA1 | YES | jwa.RSA_OAEP | +| RSA-OAEP-SHA256 | YES | jwa.RSA_OAEP_256 | +| AES key wrap (128) | YES | jwa.A128KW | +| AES key wrap (192) | YES | jwa.A192KW | +| AES key wrap (256) | YES | jwa.A256KW | +| Direct encryption | YES (1) | jwa.DIRECT | +| ECDH-ES | YES (1) | jwa.ECDH_ES | +| ECDH-ES + AES key wrap (128) | YES | jwa.ECDH_ES_A128KW | +| ECDH-ES + AES key wrap (192) | YES | jwa.ECDH_ES_A192KW | +| ECDH-ES + AES key wrap (256) | YES | jwa.ECDH_ES_A256KW | +| AES-GCM key wrap (128) | YES | jwa.A128GCMKW | +| AES-GCM key wrap (192) | YES | jwa.A192GCMKW | +| AES-GCM key wrap (256) | YES | jwa.A256GCMKW | +| PBES2 + HMAC-SHA256 + AES key wrap (128) | YES | jwa.PBES2_HS256_A128KW | +| PBES2 + HMAC-SHA384 + AES key wrap (192) | YES | jwa.PBES2_HS384_A192KW | +| PBES2 + HMAC-SHA512 + AES key wrap (256) | YES | jwa.PBES2_HS512_A256KW | + +* Note 1: Single-recipient only + +Supported content encryption algorithm: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:----------------------------|:-----------|:--------------------------| +| AES-CBC + HMAC-SHA256 (128) | YES | jwa.A128CBC_HS256 | +| AES-CBC + HMAC-SHA384 (192) | YES | jwa.A192CBC_HS384 | +| AES-CBC + HMAC-SHA512 (256) | YES | jwa.A256CBC_HS512 | +| AES-GCM (128) | YES | jwa.A128GCM | +| AES-GCM (192) | YES | jwa.A192GCM | +| AES-GCM (256) | YES | jwa.A256GCM | + +# SYNOPSIS + +## Encrypt data + +```go +func ExampleEncrypt() { + privkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + payload := []byte("Lorem Ipsum") + + encrypted, err := jwe.Encrypt(payload, jwe.WithKey(jwa.RSA1_5, &privkey.PublicKey), jwe.WithContentEncryption(jwa.A128CBC_HS256)) + if err != nil { + log.Printf("failed to encrypt payload: %s", err) + return + } + _ = encrypted + // OUTPUT: +} +``` + +## Decrypt data + +```go +func ExampleDecrypt() { + privkey, encrypted, err := exampleGenPayload() + if err != nil { + log.Printf("failed to generate encrypted payload: %s", err) + return + } + + decrypted, err := jwe.Decrypt(encrypted, jwe.WithKey(jwa.RSA1_5, privkey)) + if err != nil { + log.Printf("failed to decrypt: %s", err) + return + } + + if string(decrypted) != "Lorem Ipsum" { + log.Printf("WHAT?!") + return + } + // OUTPUT: +} +``` diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/compress.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/compress.go new file mode 100644 index 0000000000..0beba4a58a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/compress.go @@ -0,0 +1,36 @@ +package jwe + +import ( + "bytes" + "compress/flate" + "fmt" + "io" + + "github.com/lestrrat-go/jwx/v2/internal/pool" +) + +func uncompress(plaintext []byte) ([]byte, error) { + return io.ReadAll(flate.NewReader(bytes.NewReader(plaintext))) +} + +func compress(plaintext []byte) ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + w, _ := flate.NewWriter(buf, 1) + in := plaintext + for len(in) > 0 { + n, err := w.Write(in) + if err != nil { + return nil, fmt.Errorf(`failed to write to compression writer: %w`, err) + } + in = in[n:] + } + if err := w.Close(); err != nil { + return nil, fmt.Errorf(`failed to close compression writer: %w`, err) + } + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/decrypt.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/decrypt.go new file mode 100644 index 0000000000..1988f8095d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/decrypt.go @@ -0,0 +1,301 @@ +package jwe + +import ( + "crypto/aes" + cryptocipher "crypto/cipher" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "golang.org/x/crypto/pbkdf2" + + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/cipher" + "github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +// decrypter is responsible for taking various components to decrypt a message. +// its operation is not concurrency safe. You must provide locking yourself +// +//nolint:govet +type decrypter struct { + aad []byte + apu []byte + apv []byte + computedAad []byte + iv []byte + keyiv []byte + keysalt []byte + keytag []byte + tag []byte + privkey interface{} + pubkey interface{} + ctalg jwa.ContentEncryptionAlgorithm + keyalg jwa.KeyEncryptionAlgorithm + cipher content_crypt.Cipher + keycount int +} + +// newDecrypter Creates a new Decrypter instance. You must supply the +// rest of parameters via their respective setter methods before +// calling Decrypt(). +// +// privkey must be a private key in its "raw" format (i.e. something like +// *rsa.PrivateKey, instead of jwk.Key) +// +// You should consider this object immutable once you assign values to it. +func newDecrypter(keyalg jwa.KeyEncryptionAlgorithm, ctalg jwa.ContentEncryptionAlgorithm, privkey interface{}) *decrypter { + return &decrypter{ + ctalg: ctalg, + keyalg: keyalg, + privkey: privkey, + } +} + +func (d *decrypter) AgreementPartyUInfo(apu []byte) *decrypter { + d.apu = apu + return d +} + +func (d *decrypter) AgreementPartyVInfo(apv []byte) *decrypter { + d.apv = apv + return d +} + +func (d *decrypter) AuthenticatedData(aad []byte) *decrypter { + d.aad = aad + return d +} + +func (d *decrypter) ComputedAuthenticatedData(aad []byte) *decrypter { + d.computedAad = aad + return d +} + +func (d *decrypter) ContentEncryptionAlgorithm(ctalg jwa.ContentEncryptionAlgorithm) *decrypter { + d.ctalg = ctalg + return d +} + +func (d *decrypter) InitializationVector(iv []byte) *decrypter { + d.iv = iv + return d +} + +func (d *decrypter) KeyCount(keycount int) *decrypter { + d.keycount = keycount + return d +} + +func (d *decrypter) KeyInitializationVector(keyiv []byte) *decrypter { + d.keyiv = keyiv + return d +} + +func (d *decrypter) KeySalt(keysalt []byte) *decrypter { + d.keysalt = keysalt + return d +} + +func (d *decrypter) KeyTag(keytag []byte) *decrypter { + d.keytag = keytag + return d +} + +// PublicKey sets the public key to be used in decoding EC based encryptions. +// The key must be in its "raw" format (i.e. *ecdsa.PublicKey, instead of jwk.Key) +func (d *decrypter) PublicKey(pubkey interface{}) *decrypter { + d.pubkey = pubkey + return d +} + +func (d *decrypter) Tag(tag []byte) *decrypter { + d.tag = tag + return d +} + +func (d *decrypter) ContentCipher() (content_crypt.Cipher, error) { + if d.cipher == nil { + switch d.ctalg { + case jwa.A128GCM, jwa.A192GCM, jwa.A256GCM, jwa.A128CBC_HS256, jwa.A192CBC_HS384, jwa.A256CBC_HS512: + cipher, err := cipher.NewAES(d.ctalg) + if err != nil { + return nil, fmt.Errorf(`failed to build content cipher for %s: %w`, d.ctalg, err) + } + d.cipher = cipher + default: + return nil, fmt.Errorf(`invalid content cipher algorithm (%s)`, d.ctalg) + } + } + + return d.cipher, nil +} + +func (d *decrypter) Decrypt(recipientKey, ciphertext []byte) (plaintext []byte, err error) { + cek, keyerr := d.DecryptKey(recipientKey) + if keyerr != nil { + err = fmt.Errorf(`failed to decrypt key: %w`, keyerr) + return + } + + cipher, ciphererr := d.ContentCipher() + if ciphererr != nil { + err = fmt.Errorf(`failed to fetch content crypt cipher: %w`, ciphererr) + return + } + + computedAad := d.computedAad + if d.aad != nil { + computedAad = append(append(computedAad, '.'), d.aad...) + } + + plaintext, err = cipher.Decrypt(cek, d.iv, ciphertext, d.tag, computedAad) + if err != nil { + err = fmt.Errorf(`failed to decrypt payload: %w`, err) + return + } + + return plaintext, nil +} + +func (d *decrypter) decryptSymmetricKey(recipientKey, cek []byte) ([]byte, error) { + switch d.keyalg { + case jwa.DIRECT: + return cek, nil + case jwa.PBES2_HS256_A128KW, jwa.PBES2_HS384_A192KW, jwa.PBES2_HS512_A256KW: + var hashFunc func() hash.Hash + var keylen int + switch d.keyalg { + case jwa.PBES2_HS256_A128KW: + hashFunc = sha256.New + keylen = 16 + case jwa.PBES2_HS384_A192KW: + hashFunc = sha512.New384 + keylen = 24 + case jwa.PBES2_HS512_A256KW: + hashFunc = sha512.New + keylen = 32 + } + salt := []byte(d.keyalg) + salt = append(salt, byte(0)) + salt = append(salt, d.keysalt...) + cek = pbkdf2.Key(cek, salt, d.keycount, keylen, hashFunc) + fallthrough + case jwa.A128KW, jwa.A192KW, jwa.A256KW: + block, err := aes.NewCipher(cek) + if err != nil { + return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err) + } + + jek, err := keyenc.Unwrap(block, recipientKey) + if err != nil { + return nil, fmt.Errorf(`failed to unwrap key: %w`, err) + } + + return jek, nil + case jwa.A128GCMKW, jwa.A192GCMKW, jwa.A256GCMKW: + if len(d.keyiv) != 12 { + return nil, fmt.Errorf("GCM requires 96-bit iv, got %d", len(d.keyiv)*8) + } + if len(d.keytag) != 16 { + return nil, fmt.Errorf("GCM requires 128-bit tag, got %d", len(d.keytag)*8) + } + block, err := aes.NewCipher(cek) + if err != nil { + return nil, fmt.Errorf(`failed to create new AES cipher: %w`, err) + } + aesgcm, err := cryptocipher.NewGCM(block) + if err != nil { + return nil, fmt.Errorf(`failed to create new GCM wrap: %w`, err) + } + ciphertext := recipientKey[:] + ciphertext = append(ciphertext, d.keytag...) + jek, err := aesgcm.Open(nil, d.keyiv, ciphertext, nil) + if err != nil { + return nil, fmt.Errorf(`failed to decode key: %w`, err) + } + return jek, nil + default: + return nil, fmt.Errorf("decrypt key: unsupported algorithm %s", d.keyalg) + } +} + +func (d *decrypter) DecryptKey(recipientKey []byte) (cek []byte, err error) { + if d.keyalg.IsSymmetric() { + var ok bool + cek, ok = d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("decrypt key: []byte is required as the key to build %s key decrypter (got %T)", d.keyalg, d.privkey) + } + + return d.decryptSymmetricKey(recipientKey, cek) + } + + k, err := d.BuildKeyDecrypter() + if err != nil { + return nil, fmt.Errorf(`failed to build key decrypter: %w`, err) + } + + cek, err = k.Decrypt(recipientKey) + if err != nil { + return nil, fmt.Errorf(`failed to decrypt key: %w`, err) + } + + return cek, nil +} + +func (d *decrypter) BuildKeyDecrypter() (keyenc.Decrypter, error) { + cipher, err := d.ContentCipher() + if err != nil { + return nil, fmt.Errorf(`failed to fetch content crypt cipher: %w`, err) + } + + switch alg := d.keyalg; alg { + case jwa.RSA1_5: + var privkey rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, d.privkey); err != nil { + return nil, fmt.Errorf(`*rsa.PrivateKey is required as the key to build %s key decrypter: %w`, alg, err) + } + + return keyenc.NewRSAPKCS15Decrypt(alg, &privkey, cipher.KeySize()/2), nil + case jwa.RSA_OAEP, jwa.RSA_OAEP_256: + var privkey rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, d.privkey); err != nil { + return nil, fmt.Errorf(`*rsa.PrivateKey is required as the key to build %s key decrypter: %w`, alg, err) + } + + return keyenc.NewRSAOAEPDecrypt(alg, &privkey) + case jwa.A128KW, jwa.A192KW, jwa.A256KW: + sharedkey, ok := d.privkey.([]byte) + if !ok { + return nil, fmt.Errorf("[]byte is required as the key to build %s key decrypter", alg) + } + + return keyenc.NewAES(alg, sharedkey) + case jwa.ECDH_ES, jwa.ECDH_ES_A128KW, jwa.ECDH_ES_A192KW, jwa.ECDH_ES_A256KW: + switch d.pubkey.(type) { + case x25519.PublicKey: + return keyenc.NewECDHESDecrypt(alg, d.ctalg, d.pubkey, d.apu, d.apv, d.privkey), nil + default: + var pubkey ecdsa.PublicKey + if err := keyconv.ECDSAPublicKey(&pubkey, d.pubkey); err != nil { + return nil, fmt.Errorf(`*ecdsa.PublicKey is required as the key to build %s key decrypter: %w`, alg, err) + } + + var privkey ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, d.privkey); err != nil { + return nil, fmt.Errorf(`*ecdsa.PrivateKey is required as the key to build %s key decrypter: %w`, alg, err) + } + + return keyenc.NewECDHESDecrypt(alg, d.ctalg, &pubkey, d.apu, d.apv, &privkey), nil + } + default: + return nil, fmt.Errorf(`unsupported algorithm for key decryption (%s)`, alg) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers.go new file mode 100644 index 0000000000..1145591158 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers.go @@ -0,0 +1,122 @@ +package jwe + +import ( + "context" + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" +) + +type isZeroer interface { + isZero() bool +} + +func (h *stdHeaders) isZero() bool { + return h.agreementPartyUInfo == nil && + h.agreementPartyVInfo == nil && + h.algorithm == nil && + h.compression == nil && + h.contentEncryption == nil && + h.contentType == nil && + h.critical == nil && + h.ephemeralPublicKey == nil && + h.jwk == nil && + h.jwkSetURL == nil && + h.keyID == nil && + h.typ == nil && + h.x509CertChain == nil && + h.x509CertThumbprint == nil && + h.x509CertThumbprintS256 == nil && + h.x509URL == nil && + len(h.privateParams) == 0 +} + +// Iterate returns a channel that successively returns all the +// header name and values. +func (h *stdHeaders) Iterate(ctx context.Context) Iterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *stdHeaders) Walk(ctx context.Context, visitor Visitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *stdHeaders) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} + +func (h *stdHeaders) Clone(ctx context.Context) (Headers, error) { + dst := NewHeaders() + if err := h.Copy(ctx, dst); err != nil { + return nil, fmt.Errorf(`failed to copy header contents to new object: %w`, err) + } + return dst, nil +} + +func (h *stdHeaders) Copy(ctx context.Context, dst Headers) error { + for _, pair := range h.makePairs() { + //nolint:forcetypeassert + key := pair.Key.(string) + if err := dst.Set(key, pair.Value); err != nil { + return fmt.Errorf(`failed to set header %q: %w`, key, err) + } + } + return nil +} + +func (h *stdHeaders) Merge(ctx context.Context, h2 Headers) (Headers, error) { + h3 := NewHeaders() + + if h != nil { + if err := h.Copy(ctx, h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from receiver: %w`, err) + } + } + + if h2 != nil { + if err := h2.Copy(ctx, h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from argument: %w`, err) + } + } + + return h3, nil +} + +func (h *stdHeaders) Encode() ([]byte, error) { + buf, err := json.Marshal(h) + if err != nil { + return nil, fmt.Errorf(`failed to marshal headers to JSON prior to encoding: %w`, err) + } + + return base64.Encode(buf), nil +} + +func (h *stdHeaders) Decode(buf []byte) error { + // base64 json string -> json object representation of header + decoded, err := base64.Decode(buf) + if err != nil { + return fmt.Errorf(`failed to unmarshal base64 encoded buffer: %w`, err) + } + + if err := json.Unmarshal(decoded, h); err != nil { + return fmt.Errorf(`failed to unmarshal buffer: %w`, err) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers_gen.go new file mode 100644 index 0000000000..61ce413cba --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/headers_gen.go @@ -0,0 +1,715 @@ +// This file is auto-generated by jwe/internal/cmd/genheaders/main.go. DO NOT EDIT + +package jwe + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +const ( + AgreementPartyUInfoKey = "apu" + AgreementPartyVInfoKey = "apv" + AlgorithmKey = "alg" + CompressionKey = "zip" + ContentEncryptionKey = "enc" + ContentTypeKey = "cty" + CriticalKey = "crit" + EphemeralPublicKeyKey = "epk" + JWKKey = "jwk" + JWKSetURLKey = "jku" + KeyIDKey = "kid" + TypeKey = "typ" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" + X509URLKey = "x5u" +) + +// Headers describe a standard Header set. +type Headers interface { + json.Marshaler + json.Unmarshaler + AgreementPartyUInfo() []byte + AgreementPartyVInfo() []byte + Algorithm() jwa.KeyEncryptionAlgorithm + Compression() jwa.CompressionAlgorithm + ContentEncryption() jwa.ContentEncryptionAlgorithm + ContentType() string + Critical() []string + EphemeralPublicKey() jwk.Key + JWK() jwk.Key + JWKSetURL() string + KeyID() string + Type() string + X509CertChain() *cert.Chain + X509CertThumbprint() string + X509CertThumbprintS256() string + X509URL() string + Iterate(ctx context.Context) Iterator + Walk(ctx context.Context, v Visitor) error + AsMap(ctx context.Context) (map[string]interface{}, error) + Get(string) (interface{}, bool) + Set(string, interface{}) error + Remove(string) error + Encode() ([]byte, error) + Decode([]byte) error + // PrivateParams returns the map containing the non-standard ('private') parameters + // in the associated header. WARNING: DO NOT USE PrivateParams() + // IF YOU HAVE CONCURRENT CODE ACCESSING THEM. Use AsMap() to + // get a copy of the entire header instead + PrivateParams() map[string]interface{} + Clone(context.Context) (Headers, error) + Copy(context.Context, Headers) error + Merge(context.Context, Headers) (Headers, error) +} + +type stdHeaders struct { + agreementPartyUInfo []byte + agreementPartyVInfo []byte + algorithm *jwa.KeyEncryptionAlgorithm + compression *jwa.CompressionAlgorithm + contentEncryption *jwa.ContentEncryptionAlgorithm + contentType *string + critical []string + ephemeralPublicKey jwk.Key + jwk jwk.Key + jwkSetURL *string + keyID *string + typ *string + x509CertChain *cert.Chain + x509CertThumbprint *string + x509CertThumbprintS256 *string + x509URL *string + privateParams map[string]interface{} + mu *sync.RWMutex +} + +func NewHeaders() Headers { + return &stdHeaders{ + mu: &sync.RWMutex{}, + privateParams: map[string]interface{}{}, + } +} + +func (h *stdHeaders) AgreementPartyUInfo() []byte { + h.mu.RLock() + defer h.mu.RUnlock() + return h.agreementPartyUInfo +} + +func (h *stdHeaders) AgreementPartyVInfo() []byte { + h.mu.RLock() + defer h.mu.RUnlock() + return h.agreementPartyVInfo +} + +func (h *stdHeaders) Algorithm() jwa.KeyEncryptionAlgorithm { + h.mu.RLock() + defer h.mu.RUnlock() + if h.algorithm == nil { + return "" + } + return *(h.algorithm) +} + +func (h *stdHeaders) Compression() jwa.CompressionAlgorithm { + h.mu.RLock() + defer h.mu.RUnlock() + if h.compression == nil { + return jwa.NoCompress + } + return *(h.compression) +} + +func (h *stdHeaders) ContentEncryption() jwa.ContentEncryptionAlgorithm { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentEncryption == nil { + return "" + } + return *(h.contentEncryption) +} + +func (h *stdHeaders) ContentType() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentType == nil { + return "" + } + return *(h.contentType) +} + +func (h *stdHeaders) Critical() []string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.critical +} + +func (h *stdHeaders) EphemeralPublicKey() jwk.Key { + h.mu.RLock() + defer h.mu.RUnlock() + return h.ephemeralPublicKey +} + +func (h *stdHeaders) JWK() jwk.Key { + h.mu.RLock() + defer h.mu.RUnlock() + return h.jwk +} + +func (h *stdHeaders) JWKSetURL() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.jwkSetURL == nil { + return "" + } + return *(h.jwkSetURL) +} + +func (h *stdHeaders) KeyID() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.keyID == nil { + return "" + } + return *(h.keyID) +} + +func (h *stdHeaders) Type() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.typ == nil { + return "" + } + return *(h.typ) +} + +func (h *stdHeaders) X509CertChain() *cert.Chain { + h.mu.RLock() + defer h.mu.RUnlock() + return h.x509CertChain +} + +func (h *stdHeaders) X509CertThumbprint() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprint == nil { + return "" + } + return *(h.x509CertThumbprint) +} + +func (h *stdHeaders) X509CertThumbprintS256() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprintS256 == nil { + return "" + } + return *(h.x509CertThumbprintS256) +} + +func (h *stdHeaders) X509URL() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509URL == nil { + return "" + } + return *(h.x509URL) +} + +func (h *stdHeaders) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + var pairs []*HeaderPair + if h.agreementPartyUInfo != nil { + pairs = append(pairs, &HeaderPair{Key: AgreementPartyUInfoKey, Value: h.agreementPartyUInfo}) + } + if h.agreementPartyVInfo != nil { + pairs = append(pairs, &HeaderPair{Key: AgreementPartyVInfoKey, Value: h.agreementPartyVInfo}) + } + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.compression != nil { + pairs = append(pairs, &HeaderPair{Key: CompressionKey, Value: *(h.compression)}) + } + if h.contentEncryption != nil { + pairs = append(pairs, &HeaderPair{Key: ContentEncryptionKey, Value: *(h.contentEncryption)}) + } + if h.contentType != nil { + pairs = append(pairs, &HeaderPair{Key: ContentTypeKey, Value: *(h.contentType)}) + } + if h.critical != nil { + pairs = append(pairs, &HeaderPair{Key: CriticalKey, Value: h.critical}) + } + if h.ephemeralPublicKey != nil { + pairs = append(pairs, &HeaderPair{Key: EphemeralPublicKeyKey, Value: h.ephemeralPublicKey}) + } + if h.jwk != nil { + pairs = append(pairs, &HeaderPair{Key: JWKKey, Value: h.jwk}) + } + if h.jwkSetURL != nil { + pairs = append(pairs, &HeaderPair{Key: JWKSetURLKey, Value: *(h.jwkSetURL)}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.typ != nil { + pairs = append(pairs, &HeaderPair{Key: TypeKey, Value: *(h.typ)}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *stdHeaders) PrivateParams() map[string]interface{} { + h.mu.RLock() + defer h.mu.RUnlock() + return h.privateParams +} + +func (h *stdHeaders) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AgreementPartyUInfoKey: + if h.agreementPartyUInfo == nil { + return nil, false + } + return h.agreementPartyUInfo, true + case AgreementPartyVInfoKey: + if h.agreementPartyVInfo == nil { + return nil, false + } + return h.agreementPartyVInfo, true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case CompressionKey: + if h.compression == nil { + return nil, false + } + return *(h.compression), true + case ContentEncryptionKey: + if h.contentEncryption == nil { + return nil, false + } + return *(h.contentEncryption), true + case ContentTypeKey: + if h.contentType == nil { + return nil, false + } + return *(h.contentType), true + case CriticalKey: + if h.critical == nil { + return nil, false + } + return h.critical, true + case EphemeralPublicKeyKey: + if h.ephemeralPublicKey == nil { + return nil, false + } + return h.ephemeralPublicKey, true + case JWKKey: + if h.jwk == nil { + return nil, false + } + return h.jwk, true + case JWKSetURLKey: + if h.jwkSetURL == nil { + return nil, false + } + return *(h.jwkSetURL), true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case TypeKey: + if h.typ == nil { + return nil, false + } + return *(h.typ), true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *stdHeaders) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *stdHeaders) setNoLock(name string, value interface{}) error { + switch name { + case AgreementPartyUInfoKey: + if v, ok := value.([]byte); ok { + h.agreementPartyUInfo = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyUInfoKey, value) + case AgreementPartyVInfoKey: + if v, ok := value.([]byte); ok { + h.agreementPartyVInfo = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AgreementPartyVInfoKey, value) + case AlgorithmKey: + if v, ok := value.(jwa.KeyEncryptionAlgorithm); ok { + h.algorithm = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, AlgorithmKey, value) + case CompressionKey: + if v, ok := value.(jwa.CompressionAlgorithm); ok { + h.compression = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CompressionKey, value) + case ContentEncryptionKey: + if v, ok := value.(jwa.ContentEncryptionAlgorithm); ok { + if v == "" { + return fmt.Errorf(`"enc" field cannot be an empty string`) + } + h.contentEncryption = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentEncryptionKey, value) + case ContentTypeKey: + if v, ok := value.(string); ok { + h.contentType = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value) + case CriticalKey: + if v, ok := value.([]string); ok { + h.critical = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value) + case EphemeralPublicKeyKey: + if v, ok := value.(jwk.Key); ok { + h.ephemeralPublicKey = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, EphemeralPublicKeyKey, value) + case JWKKey: + if v, ok := value.(jwk.Key); ok { + h.jwk = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value) + case JWKSetURLKey: + if v, ok := value.(string); ok { + h.jwkSetURL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case TypeKey: + if v, ok := value.(string); ok { + h.typ = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (h *stdHeaders) Remove(key string) error { + h.mu.Lock() + defer h.mu.Unlock() + switch key { + case AgreementPartyUInfoKey: + h.agreementPartyUInfo = nil + case AgreementPartyVInfoKey: + h.agreementPartyVInfo = nil + case AlgorithmKey: + h.algorithm = nil + case CompressionKey: + h.compression = nil + case ContentEncryptionKey: + h.contentEncryption = nil + case ContentTypeKey: + h.contentType = nil + case CriticalKey: + h.critical = nil + case EphemeralPublicKeyKey: + h.ephemeralPublicKey = nil + case JWKKey: + h.jwk = nil + case JWKSetURLKey: + h.jwkSetURL = nil + case KeyIDKey: + h.keyID = nil + case TypeKey: + h.typ = nil + case X509CertChainKey: + h.x509CertChain = nil + case X509CertThumbprintKey: + h.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + h.x509CertThumbprintS256 = nil + case X509URLKey: + h.x509URL = nil + default: + delete(h.privateParams, key) + } + return nil +} + +func (h *stdHeaders) UnmarshalJSON(buf []byte) error { + h.agreementPartyUInfo = nil + h.agreementPartyVInfo = nil + h.algorithm = nil + h.compression = nil + h.contentEncryption = nil + h.contentType = nil + h.critical = nil + h.ephemeralPublicKey = nil + h.jwk = nil + h.jwkSetURL = nil + h.keyID = nil + h.typ = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case AgreementPartyUInfoKey: + if err := json.AssignNextBytesToken(&h.agreementPartyUInfo, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyUInfoKey, err) + } + case AgreementPartyVInfoKey: + if err := json.AssignNextBytesToken(&h.agreementPartyVInfo, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AgreementPartyVInfoKey, err) + } + case AlgorithmKey: + var decoded jwa.KeyEncryptionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &decoded + case CompressionKey: + var decoded jwa.CompressionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CompressionKey, err) + } + h.compression = &decoded + case ContentEncryptionKey: + var decoded jwa.ContentEncryptionAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentEncryptionKey, err) + } + h.contentEncryption = &decoded + case ContentTypeKey: + if err := json.AssignNextStringToken(&h.contentType, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err) + } + case CriticalKey: + var decoded []string + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err) + } + h.critical = decoded + case EphemeralPublicKeyKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s:%w`, EphemeralPublicKeyKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, EphemeralPublicKeyKey, err) + } + h.ephemeralPublicKey = key + case JWKKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s:%w`, JWKKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err) + } + h.jwk = key + case JWKSetURLKey: + if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case TypeKey: + if err := json.AssignNextStringToken(&h.typ, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + decoded, err := registry.Decode(dec, tok) + if err != nil { + return err + } + h.setNoLock(tok, decoded) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + return nil +} + +func (h stdHeaders) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 16) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s`, f) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/interface.go new file mode 100644 index 0000000000..d1044ce1d4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/interface.go @@ -0,0 +1,160 @@ +package jwe + +import ( + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" +) + +// Recipient holds the encrypted key and hints to decrypt the key +type Recipient interface { + Headers() Headers + EncryptedKey() []byte + SetHeaders(Headers) error + SetEncryptedKey([]byte) error +} + +type stdRecipient struct { + // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516 + // + // header + // The "header" member MUST be present and contain the value JWE Per- + // Recipient Unprotected Header when the JWE Per-Recipient + // Unprotected Header value is non-empty; otherwise, it MUST be + // absent. This value is represented as an unencoded JSON object, + // rather than as a string. These Header Parameter values are not + // integrity protected. + // + // At least one of the "header", "protected", and "unprotected" members + // MUST be present so that "alg" and "enc" Header Parameter values are + // conveyed for each recipient computation. + // + // JWX note: see Message.unprotectedHeaders + headers Headers + + // encrypted_key + // The "encrypted_key" member MUST be present and contain the value + // BASE64URL(JWE Encrypted Key) when the JWE Encrypted Key value is + // non-empty; otherwise, it MUST be absent. + encryptedKey []byte +} + +// Message contains the entire encrypted JWE message. You should not +// expect to use Message for anything other than inspecting the +// state of an encrypted message. This is because encryption is +// highly context sensitive, and once we parse the original payload +// into an object, we may not always be able to recreate the exact +// context in which the encryption happened. +// +// For example, it is totally valid for if the protected header's +// integrity was calculated using a non-standard line breaks: +// +// {"a dummy": +// "protected header"} +// +// Once parsed, though, we can only serialize the protected header as: +// +// {"a dummy":"protected header"} +// +// which would obviously result in a contradicting integrity value +// if we tried to re-calculate it from a parsed message. +// +//nolint:govet +type Message struct { + // Comments on each field are taken from https://datatracker.ietf.org/doc/html/rfc7516 + // + // protected + // The "protected" member MUST be present and contain the value + // BASE64URL(UTF8(JWE Protected Header)) when the JWE Protected + // Header value is non-empty; otherwise, it MUST be absent. These + // Header Parameter values are integrity protected. + protectedHeaders Headers + + // unprotected + // The "unprotected" member MUST be present and contain the value JWE + // Shared Unprotected Header when the JWE Shared Unprotected Header + // value is non-empty; otherwise, it MUST be absent. This value is + // represented as an unencoded JSON object, rather than as a string. + // These Header Parameter values are not integrity protected. + // + // JWX note: This field is NOT mutually exclusive with per-recipient + // headers within the implmentation because... it's too much work. + // It is _never_ populated (we don't provide a way to do this) upon encryption. + // When decrypting, if present its values are always merged with + // per-recipient header. + unprotectedHeaders Headers + + // iv + // The "iv" member MUST be present and contain the value + // BASE64URL(JWE Initialization Vector) when the JWE Initialization + // Vector value is non-empty; otherwise, it MUST be absent. + initializationVector []byte + + // aad + // The "aad" member MUST be present and contain the value + // BASE64URL(JWE AAD)) when the JWE AAD value is non-empty; + // otherwise, it MUST be absent. A JWE AAD value can be included to + // supply a base64url-encoded value to be integrity protected but not + // encrypted. + authenticatedData []byte + + // ciphertext + // The "ciphertext" member MUST be present and contain the value + // BASE64URL(JWE Ciphertext). + cipherText []byte + + // tag + // The "tag" member MUST be present and contain the value + // BASE64URL(JWE Authentication Tag) when the JWE Authentication Tag + // value is non-empty; otherwise, it MUST be absent. + tag []byte + + // recipients + // The "recipients" member value MUST be an array of JSON objects. + // Each object contains information specific to a single recipient. + // This member MUST be present with exactly one array element per + // recipient, even if some or all of the array element values are the + // empty JSON object "{}" (which can happen when all Header Parameter + // values are shared between all recipients and when no encrypted key + // is used, such as when doing Direct Encryption). + // + // Some Header Parameters, including the "alg" parameter, can be shared + // among all recipient computations. Header Parameters in the JWE + // Protected Header and JWE Shared Unprotected Header values are shared + // among all recipients. + // + // The Header Parameter values used when creating or validating per- + // recipient ciphertext and Authentication Tag values are the union of + // the three sets of Header Parameter values that may be present: (1) + // the JWE Protected Header represented in the "protected" member, (2) + // the JWE Shared Unprotected Header represented in the "unprotected" + // member, and (3) the JWE Per-Recipient Unprotected Header represented + // in the "header" member of the recipient's array element. The union + // of these sets of Header Parameters comprises the JOSE Header. The + // Header Parameter names in the three locations MUST be disjoint. + recipients []Recipient + + // TODO: Additional members can be present in both the JSON objects defined + // above; if not understood by implementations encountering them, they + // MUST be ignored. + // privateParams map[string]interface{} + + // These two fields below are not available for the public consumers of this object. + // rawProtectedHeaders stores the original protected header buffer + rawProtectedHeaders []byte + // storeProtectedHeaders is a hint to be used in UnmarshalJSON(). + // When this flag is true, UnmarshalJSON() will populate the + // rawProtectedHeaders field + storeProtectedHeaders bool +} + +// populater is an interface for things that may modify the +// JWE header. e.g. ByteWithECPrivateKey +type populater interface { + Populate(keygen.Setter) error +} + +type Visitor = iter.MapVisitor +type VisitorFunc = iter.MapVisitorFunc +type HeaderPair = mapiter.Pair +type Iterator = mapiter.Iterator diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/aescbc/aescbc.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/aescbc/aescbc.go new file mode 100644 index 0000000000..d38245ff6a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/aescbc/aescbc.go @@ -0,0 +1,218 @@ +package aescbc + +import ( + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "fmt" + "hash" +) + +const ( + NonceSize = 16 +) + +func pad(buf []byte, n int) []byte { + rem := n - len(buf)%n + if rem == 0 { + return buf + } + + newbuf := make([]byte, len(buf)+rem) + copy(newbuf, buf) + + for i := len(buf); i < len(newbuf); i++ { + newbuf[i] = byte(rem) + } + return newbuf +} + +func unpad(buf []byte, n int) ([]byte, error) { + lbuf := len(buf) + rem := lbuf % n + + // First, `buf` must be a multiple of `n` + if rem != 0 { + return nil, fmt.Errorf("input buffer must be multiple of block size %d", n) + } + + // Find the last byte, which is the encoded padding + // i.e. 0x1 == 1 byte worth of padding + last := buf[lbuf-1] + + // This is the number of padding bytes that we expect + expected := int(last) + + if expected == 0 || /* we _have_ to have padding here. therefore, 0x0 is not an option */ + expected > n || /* we also must make sure that we don't go over the block size (n) */ + expected > lbuf /* finally, it can't be more than the buffer itself. unlikely, but could happen */ { + return nil, fmt.Errorf(`invalid padding byte at the end of buffer`) + } + + // start i = 1 because we have already established that expected == int(last) where + // last = buf[lbuf-1]. + // + // we also don't check against lbuf-i in range, because we have established expected <= lbuf + for i := 1; i < expected; i++ { + if buf[lbuf-i] != last { + return nil, fmt.Errorf(`invalid padding`) + } + } + + return buf[:lbuf-expected], nil +} + +type Hmac struct { + blockCipher cipher.Block + hash func() hash.Hash + keysize int + tagsize int + integrityKey []byte +} + +type BlockCipherFunc func([]byte) (cipher.Block, error) + +func New(key []byte, f BlockCipherFunc) (hmac *Hmac, err error) { + keysize := len(key) / 2 + ikey := key[:keysize] + ekey := key[keysize:] + + bc, ciphererr := f(ekey) + if ciphererr != nil { + err = fmt.Errorf(`failed to execute block cipher function: %w`, ciphererr) + return + } + + var hfunc func() hash.Hash + switch keysize { + case 16: + hfunc = sha256.New + case 24: + hfunc = sha512.New384 + case 32: + hfunc = sha512.New + default: + return nil, fmt.Errorf("unsupported key size %d", keysize) + } + + return &Hmac{ + blockCipher: bc, + hash: hfunc, + integrityKey: ikey, + keysize: keysize, + tagsize: keysize, // NonceSize, + // While investigating GH #207, I stumbled upon another problem where + // the computed tags don't match on decrypt. After poking through the + // code using a bunch of debug statements, I've finally found out that + // tagsize = keysize makes the whole thing work. + }, nil +} + +// NonceSize fulfills the crypto.AEAD interface +func (c Hmac) NonceSize() int { + return NonceSize +} + +// Overhead fulfills the crypto.AEAD interface +func (c Hmac) Overhead() int { + return c.blockCipher.BlockSize() + c.tagsize +} + +func (c Hmac) ComputeAuthTag(aad, nonce, ciphertext []byte) ([]byte, error) { + buf := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8) + n := 0 + n += copy(buf, aad) + n += copy(buf[n:], nonce) + n += copy(buf[n:], ciphertext) + binary.BigEndian.PutUint64(buf[n:], uint64(len(aad)*8)) + + h := hmac.New(c.hash, c.integrityKey) + if _, err := h.Write(buf); err != nil { + return nil, fmt.Errorf(`failed to write ComputeAuthTag using Hmac: %w`, err) + } + s := h.Sum(nil) + return s[:c.tagsize], nil +} + +func ensureSize(dst []byte, n int) []byte { + // if the dst buffer has enough length just copy the relevant parts to it. + // Otherwise create a new slice that's big enough, and operate on that + // Note: I think go-jose has a bug in that it checks for cap(), but not len(). + ret := dst + if diff := n - len(dst); diff > 0 { + // dst is not big enough + ret = make([]byte, n) + copy(ret, dst) + } + return ret +} + +// Seal fulfills the crypto.AEAD interface +func (c Hmac) Seal(dst, nonce, plaintext, data []byte) []byte { + ctlen := len(plaintext) + ciphertext := make([]byte, ctlen+c.Overhead())[:ctlen] + copy(ciphertext, plaintext) + ciphertext = pad(ciphertext, c.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(c.blockCipher, nonce) + cbc.CryptBlocks(ciphertext, ciphertext) + + authtag, err := c.ComputeAuthTag(data, nonce, ciphertext) + if err != nil { + // Hmac implements cipher.AEAD interface. Seal can't return error. + // But currently it never reach here because of Hmac.ComputeAuthTag doesn't return error. + panic(fmt.Errorf("failed to seal on hmac: %v", err)) + } + + retlen := len(dst) + len(ciphertext) + len(authtag) + + ret := ensureSize(dst, retlen) + out := ret[len(dst):] + n := copy(out, ciphertext) + copy(out[n:], authtag) + + return ret +} + +// Open fulfills the crypto.AEAD interface +func (c Hmac) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < c.keysize { + return nil, fmt.Errorf(`invalid ciphertext (too short)`) + } + + tagOffset := len(ciphertext) - c.tagsize + if tagOffset%c.blockCipher.BlockSize() != 0 { + return nil, fmt.Errorf( + "invalid ciphertext (invalid length: %d %% %d != 0)", + tagOffset, + c.blockCipher.BlockSize(), + ) + } + tag := ciphertext[tagOffset:] + ciphertext = ciphertext[:tagOffset] + + expectedTag, err := c.ComputeAuthTag(data, nonce, ciphertext[:tagOffset]) + if err != nil { + return nil, fmt.Errorf(`failed to compute auth tag: %w`, err) + } + + if subtle.ConstantTimeCompare(expectedTag, tag) != 1 { + return nil, fmt.Errorf(`invalid ciphertext (tag mismatch)`) + } + + cbc := cipher.NewCBCDecrypter(c.blockCipher, nonce) + buf := make([]byte, tagOffset) + cbc.CryptBlocks(buf, ciphertext) + + plaintext, err := unpad(buf, c.blockCipher.BlockSize()) + if err != nil { + return nil, fmt.Errorf(`failed to generate plaintext from decrypted blocks: %w`, err) + } + ret := ensureSize(dst, len(plaintext)) + out := ret[len(dst):] + copy(out, plaintext) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/cipher.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/cipher.go new file mode 100644 index 0000000000..23f437e8bf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/cipher.go @@ -0,0 +1,161 @@ +package cipher + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/aescbc" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" +) + +var gcm = &gcmFetcher{} +var cbc = &cbcFetcher{} + +func (f gcmFetcher) Fetch(key []byte) (cipher.AEAD, error) { + aescipher, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf(`cipher: failed to create AES cipher for GCM: %w`, err) + } + + aead, err := cipher.NewGCM(aescipher) + if err != nil { + return nil, fmt.Errorf(`failed to create GCM for cipher: %w`, err) + } + return aead, nil +} + +func (f cbcFetcher) Fetch(key []byte) (cipher.AEAD, error) { + aead, err := aescbc.New(key, aes.NewCipher) + if err != nil { + return nil, fmt.Errorf(`cipher: failed to create AES cipher for CBC: %w`, err) + } + return aead, nil +} + +func (c AesContentCipher) KeySize() int { + return c.keysize +} + +func (c AesContentCipher) TagSize() int { + return c.tagsize +} + +func NewAES(alg jwa.ContentEncryptionAlgorithm) (*AesContentCipher, error) { + var keysize int + var tagsize int + var fetcher Fetcher + switch alg { + case jwa.A128GCM: + keysize = 16 + tagsize = 16 + fetcher = gcm + case jwa.A192GCM: + keysize = 24 + tagsize = 16 + fetcher = gcm + case jwa.A256GCM: + keysize = 32 + tagsize = 16 + fetcher = gcm + case jwa.A128CBC_HS256: + tagsize = 16 + keysize = tagsize * 2 + fetcher = cbc + case jwa.A192CBC_HS384: + tagsize = 24 + keysize = tagsize * 2 + fetcher = cbc + case jwa.A256CBC_HS512: + tagsize = 32 + keysize = tagsize * 2 + fetcher = cbc + default: + return nil, fmt.Errorf("failed to create AES content cipher: invalid algorithm (%s)", alg) + } + + return &AesContentCipher{ + keysize: keysize, + tagsize: tagsize, + fetch: fetcher, + }, nil +} + +func (c AesContentCipher) Encrypt(cek, plaintext, aad []byte) (iv, ciphertxt, tag []byte, err error) { + var aead cipher.AEAD + aead, err = c.fetch.Fetch(cek) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to fetch AEAD: %w`, err) + } + + // Seal may panic (argh!), so protect ourselves from that + defer func() { + if e := recover(); e != nil { + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%s", e) + } + err = fmt.Errorf(`failed to encrypt: %w`, err) + } + }() + + var bs keygen.ByteSource + if c.NonceGenerator == nil { + bs, err = keygen.NewRandom(aead.NonceSize()).Generate() + } else { + bs, err = c.NonceGenerator.Generate() + } + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to generate nonce: %w`, err) + } + iv = bs.Bytes() + + combined := aead.Seal(nil, iv, plaintext, aad) + tagoffset := len(combined) - c.TagSize() + + if tagoffset < 0 { + panic(fmt.Sprintf("tag offset is less than 0 (combined len = %d, tagsize = %d)", len(combined), c.TagSize())) + } + + tag = combined[tagoffset:] + ciphertxt = make([]byte, tagoffset) + copy(ciphertxt, combined[:tagoffset]) + + return +} + +func (c AesContentCipher) Decrypt(cek, iv, ciphertxt, tag, aad []byte) (plaintext []byte, err error) { + aead, err := c.fetch.Fetch(cek) + if err != nil { + return nil, fmt.Errorf(`failed to fetch AEAD data: %w`, err) + } + + // Open may panic (argh!), so protect ourselves from that + defer func() { + if e := recover(); e != nil { + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf(`%s`, e) + } + err = fmt.Errorf(`failed to decrypt: %w`, err) + return + } + }() + + combined := make([]byte, len(ciphertxt)+len(tag)) + copy(combined, ciphertxt) + copy(combined[len(ciphertxt):], tag) + + buf, aeaderr := aead.Open(nil, iv, combined, aad) + if aeaderr != nil { + err = fmt.Errorf(`aead.Open failed: %w`, aeaderr) + return + } + plaintext = buf + return +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/interface.go new file mode 100644 index 0000000000..88b50073f8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/cipher/interface.go @@ -0,0 +1,34 @@ +package cipher + +import ( + "crypto/cipher" + + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" +) + +const ( + TagSize = 16 +) + +// ContentCipher knows how to encrypt/decrypt the content given a content +// encryption key and other data +type ContentCipher interface { + KeySize() int + Encrypt(cek, aad, plaintext []byte) ([]byte, []byte, []byte, error) + Decrypt(cek, iv, aad, ciphertext, tag []byte) ([]byte, error) +} + +type Fetcher interface { + Fetch([]byte) (cipher.AEAD, error) +} + +type gcmFetcher struct{} +type cbcFetcher struct{} + +// AesContentCipher represents a cipher based on AES +type AesContentCipher struct { + NonceGenerator keygen.Generator + fetch Fetcher + keysize int + tagsize int +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf/concatkdf.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf/concatkdf.go new file mode 100644 index 0000000000..3691830a63 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf/concatkdf.go @@ -0,0 +1,66 @@ +package concatkdf + +import ( + "crypto" + "encoding/binary" + "fmt" +) + +type KDF struct { + buf []byte + otherinfo []byte + z []byte + hash crypto.Hash +} + +func ndata(src []byte) []byte { + buf := make([]byte, 4+len(src)) + binary.BigEndian.PutUint32(buf, uint32(len(src))) + copy(buf[4:], src) + return buf +} + +func New(hash crypto.Hash, alg, Z, apu, apv, pubinfo, privinfo []byte) *KDF { + algbuf := ndata(alg) + apubuf := ndata(apu) + apvbuf := ndata(apv) + + concat := make([]byte, len(algbuf)+len(apubuf)+len(apvbuf)+len(pubinfo)+len(privinfo)) + n := copy(concat, algbuf) + n += copy(concat[n:], apubuf) + n += copy(concat[n:], apvbuf) + n += copy(concat[n:], pubinfo) + copy(concat[n:], privinfo) + + return &KDF{ + hash: hash, + otherinfo: concat, + z: Z, + } +} + +func (k *KDF) Read(out []byte) (int, error) { + var round uint32 = 1 + h := k.hash.New() + + for len(out) > len(k.buf) { + h.Reset() + + if err := binary.Write(h, binary.BigEndian, round); err != nil { + return 0, fmt.Errorf(`failed to write round using kdf: %w`, err) + } + if _, err := h.Write(k.z); err != nil { + return 0, fmt.Errorf(`failed to write z using kdf: %w`, err) + } + if _, err := h.Write(k.otherinfo); err != nil { + return 0, fmt.Errorf(`failed to write other info using kdf: %w`, err) + } + + k.buf = append(k.buf, h.Sum(nil)...) + round++ + } + + n := copy(out, k.buf[:len(out)]) + k.buf = k.buf[len(out):] + return n, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/content_crypt.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/content_crypt.go new file mode 100644 index 0000000000..722e8489c9 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/content_crypt.go @@ -0,0 +1,43 @@ +package content_crypt //nolint:golint + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/cipher" +) + +func (c Generic) Algorithm() jwa.ContentEncryptionAlgorithm { + return c.alg +} + +func (c Generic) Encrypt(cek, plaintext, aad []byte) ([]byte, []byte, []byte, error) { + iv, encrypted, tag, err := c.cipher.Encrypt(cek, plaintext, aad) + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to crypt content: %w`, err) + } + + return iv, encrypted, tag, nil +} + +func (c Generic) Decrypt(cek, iv, ciphertext, tag, aad []byte) ([]byte, error) { + return c.cipher.Decrypt(cek, iv, ciphertext, tag, aad) +} + +func NewGeneric(alg jwa.ContentEncryptionAlgorithm) (*Generic, error) { + c, err := cipher.NewAES(alg) + if err != nil { + return nil, fmt.Errorf(`aes crypt: failed to create content cipher: %w`, err) + } + + return &Generic{ + alg: alg, + cipher: c, + keysize: c.KeySize(), + tagsize: 16, + }, nil +} + +func (c Generic) KeySize() int { + return c.keysize +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/interface.go new file mode 100644 index 0000000000..abfaff343a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt/interface.go @@ -0,0 +1,20 @@ +package content_crypt //nolint:golint + +import ( + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/cipher" +) + +// Generic encrypts a message by applying all the necessary +// modifications to the keys and the contents +type Generic struct { + alg jwa.ContentEncryptionAlgorithm + keysize int + tagsize int + cipher cipher.ContentCipher +} + +type Cipher interface { + Decrypt([]byte, []byte, []byte, []byte, []byte) ([]byte, error) + KeySize() int +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/interface.go new file mode 100644 index 0000000000..70fe7301ec --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/interface.go @@ -0,0 +1,106 @@ +package keyenc + +import ( + "crypto/rsa" + "hash" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" +) + +// Encrypter is an interface for things that can encrypt keys +type Encrypter interface { + Algorithm() jwa.KeyEncryptionAlgorithm + Encrypt([]byte) (keygen.ByteSource, error) + // KeyID returns the key id for this Encrypter. This exists so that + // you can pass in a Encrypter to MultiEncrypt, you can rest assured + // that the generated key will have the proper key ID. + KeyID() string + + SetKeyID(string) +} + +// Decrypter is an interface for things that can decrypt keys +type Decrypter interface { + Algorithm() jwa.KeyEncryptionAlgorithm + Decrypt([]byte) ([]byte, error) +} + +type Noop struct { + alg jwa.KeyEncryptionAlgorithm + keyID string + sharedkey []byte +} + +// AES encrypts content encryption keys using AES key wrap. +// Contrary to what the name implies, it also decrypt encrypted keys +type AES struct { + alg jwa.KeyEncryptionAlgorithm + keyID string + sharedkey []byte +} + +// AESGCM encrypts content encryption keys using AES-GCM key wrap. +type AESGCMEncrypt struct { + algorithm jwa.KeyEncryptionAlgorithm + keyID string + sharedkey []byte +} + +// ECDHESEncrypt encrypts content encryption keys using ECDH-ES. +type ECDHESEncrypt struct { + algorithm jwa.KeyEncryptionAlgorithm + keyID string + generator keygen.Generator +} + +// ECDHESDecrypt decrypts keys using ECDH-ES. +type ECDHESDecrypt struct { + keyalg jwa.KeyEncryptionAlgorithm + contentalg jwa.ContentEncryptionAlgorithm + apu []byte + apv []byte + privkey interface{} + pubkey interface{} +} + +// RSAOAEPEncrypt encrypts keys using RSA OAEP algorithm +type RSAOAEPEncrypt struct { + alg jwa.KeyEncryptionAlgorithm + pubkey *rsa.PublicKey + keyID string +} + +// RSAOAEPDecrypt decrypts keys using RSA OAEP algorithm +type RSAOAEPDecrypt struct { + alg jwa.KeyEncryptionAlgorithm + privkey *rsa.PrivateKey +} + +// RSAPKCS15Decrypt decrypts keys using RSA PKCS1v15 algorithm +type RSAPKCS15Decrypt struct { + alg jwa.KeyEncryptionAlgorithm + privkey *rsa.PrivateKey + generator keygen.Generator +} + +// RSAPKCSEncrypt encrypts keys using RSA PKCS1v15 algorithm +type RSAPKCSEncrypt struct { + alg jwa.KeyEncryptionAlgorithm + pubkey *rsa.PublicKey + keyID string +} + +// DirectDecrypt does no encryption (Note: Unimplemented) +type DirectDecrypt struct { + Key []byte +} + +// PBES2Encrypt encrypts keys with PBES2 / PBKDF2 password +type PBES2Encrypt struct { + algorithm jwa.KeyEncryptionAlgorithm + hashFunc func() hash.Hash + keylen int + keyID string + password []byte +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/keyenc.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/keyenc.go new file mode 100644 index 0000000000..3e19e62b04 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc/keyenc.go @@ -0,0 +1,660 @@ +package keyenc + +import ( + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "fmt" + "hash" + "io" + + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/pbkdf2" + + "github.com/lestrrat-go/jwx/v2/internal/ecutil" + "github.com/lestrrat-go/jwx/v2/jwa" + contentcipher "github.com/lestrrat-go/jwx/v2/jwe/internal/cipher" + "github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +func NewNoop(alg jwa.KeyEncryptionAlgorithm, sharedkey []byte) (*Noop, error) { + return &Noop{ + alg: alg, + sharedkey: sharedkey, + }, nil +} + +func (kw *Noop) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.alg +} + +func (kw *Noop) SetKeyID(v string) { + kw.keyID = v +} + +func (kw *Noop) KeyID() string { + return kw.keyID +} + +func (kw *Noop) Encrypt(cek []byte) (keygen.ByteSource, error) { + return keygen.ByteKey(kw.sharedkey), nil +} + +// NewAES creates a key-wrap encrypter using AES. +// Although the name suggests otherwise, this does the decryption as well. +func NewAES(alg jwa.KeyEncryptionAlgorithm, sharedkey []byte) (*AES, error) { + return &AES{ + alg: alg, + sharedkey: sharedkey, + }, nil +} + +// Algorithm returns the key encryption algorithm being used +func (kw *AES) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.alg +} + +func (kw *AES) SetKeyID(v string) { + kw.keyID = v +} + +// KeyID returns the key ID associated with this encrypter +func (kw *AES) KeyID() string { + return kw.keyID +} + +// Decrypt decrypts the encrypted key using AES key unwrap +func (kw *AES) Decrypt(enckey []byte) ([]byte, error) { + block, err := aes.NewCipher(kw.sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + + cek, err := Unwrap(block, enckey) + if err != nil { + return nil, fmt.Errorf(`failed to unwrap data: %w`, err) + } + return cek, nil +} + +// KeyEncrypt encrypts the given content encryption key +func (kw *AES) Encrypt(cek []byte) (keygen.ByteSource, error) { + block, err := aes.NewCipher(kw.sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + encrypted, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`keywrap: failed to wrap key: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +func NewAESGCMEncrypt(alg jwa.KeyEncryptionAlgorithm, sharedkey []byte) (*AESGCMEncrypt, error) { + return &AESGCMEncrypt{ + algorithm: alg, + sharedkey: sharedkey, + }, nil +} + +func (kw AESGCMEncrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.algorithm +} + +func (kw *AESGCMEncrypt) SetKeyID(v string) { + kw.keyID = v +} + +func (kw AESGCMEncrypt) KeyID() string { + return kw.keyID +} + +func (kw AESGCMEncrypt) Encrypt(cek []byte) (keygen.ByteSource, error) { + block, err := aes.NewCipher(kw.sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return nil, fmt.Errorf(`failed to create gcm from cipher: %w`, err) + } + + iv := make([]byte, aesgcm.NonceSize()) + _, err = io.ReadFull(rand.Reader, iv) + if err != nil { + return nil, fmt.Errorf(`failed to get random iv: %w`, err) + } + + encrypted := aesgcm.Seal(nil, iv, cek, nil) + tag := encrypted[len(encrypted)-aesgcm.Overhead():] + ciphertext := encrypted[:len(encrypted)-aesgcm.Overhead()] + return keygen.ByteWithIVAndTag{ + ByteKey: ciphertext, + IV: iv, + Tag: tag, + }, nil +} + +func NewPBES2Encrypt(alg jwa.KeyEncryptionAlgorithm, password []byte) (*PBES2Encrypt, error) { + var hashFunc func() hash.Hash + var keylen int + switch alg { + case jwa.PBES2_HS256_A128KW: + hashFunc = sha256.New + keylen = 16 + case jwa.PBES2_HS384_A192KW: + hashFunc = sha512.New384 + keylen = 24 + case jwa.PBES2_HS512_A256KW: + hashFunc = sha512.New + keylen = 32 + default: + return nil, fmt.Errorf("unexpected key encryption algorithm %s", alg) + } + return &PBES2Encrypt{ + algorithm: alg, + password: password, + hashFunc: hashFunc, + keylen: keylen, + }, nil +} + +func (kw PBES2Encrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.algorithm +} + +func (kw *PBES2Encrypt) SetKeyID(v string) { + kw.keyID = v +} + +func (kw PBES2Encrypt) KeyID() string { + return kw.keyID +} + +func (kw PBES2Encrypt) Encrypt(cek []byte) (keygen.ByteSource, error) { + count := 10000 + salt := make([]byte, kw.keylen) + _, err := io.ReadFull(rand.Reader, salt) + if err != nil { + return nil, fmt.Errorf(`failed to get random salt: %w`, err) + } + + fullsalt := []byte(kw.algorithm) + fullsalt = append(fullsalt, byte(0)) + fullsalt = append(fullsalt, salt...) + sharedkey := pbkdf2.Key(kw.password, fullsalt, count, kw.keylen, kw.hashFunc) + + block, err := aes.NewCipher(sharedkey) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher from shared key: %w`, err) + } + encrypted, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`keywrap: failed to wrap key: %w`, err) + } + return keygen.ByteWithSaltAndCount{ + ByteKey: encrypted, + Salt: salt, + Count: count, + }, nil +} + +// NewECDHESEncrypt creates a new key encrypter based on ECDH-ES +func NewECDHESEncrypt(alg jwa.KeyEncryptionAlgorithm, enc jwa.ContentEncryptionAlgorithm, keysize int, keyif interface{}, apu, apv []byte) (*ECDHESEncrypt, error) { + var generator keygen.Generator + var err error + switch key := keyif.(type) { + case *ecdsa.PublicKey: + generator, err = keygen.NewEcdhes(alg, enc, keysize, key, apu, apv) + case x25519.PublicKey: + generator, err = keygen.NewX25519(alg, enc, keysize, key) + default: + return nil, fmt.Errorf("unexpected key type %T", keyif) + } + if err != nil { + return nil, fmt.Errorf(`failed to create key generator: %w`, err) + } + return &ECDHESEncrypt{ + algorithm: alg, + generator: generator, + }, nil +} + +// Algorithm returns the key encryption algorithm being used +func (kw ECDHESEncrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.algorithm +} + +func (kw *ECDHESEncrypt) SetKeyID(v string) { + kw.keyID = v +} + +// KeyID returns the key ID associated with this encrypter +func (kw ECDHESEncrypt) KeyID() string { + return kw.keyID +} + +// KeyEncrypt encrypts the content encryption key using ECDH-ES +func (kw ECDHESEncrypt) Encrypt(cek []byte) (keygen.ByteSource, error) { + kg, err := kw.generator.Generate() + if err != nil { + return nil, fmt.Errorf(`failed to create key generator: %w`, err) + } + + bwpk, ok := kg.(keygen.ByteWithECPublicKey) + if !ok { + return nil, fmt.Errorf(`key generator generated invalid key (expected ByteWithECPrivateKey)`) + } + + if kw.algorithm == jwa.ECDH_ES { + return bwpk, nil + } + + block, err := aes.NewCipher(bwpk.Bytes()) + if err != nil { + return nil, fmt.Errorf(`failed to generate cipher from generated key: %w`, err) + } + + jek, err := Wrap(block, cek) + if err != nil { + return nil, fmt.Errorf(`failed to wrap data: %w`, err) + } + + bwpk.ByteKey = keygen.ByteKey(jek) + + return bwpk, nil +} + +// NewECDHESDecrypt creates a new key decrypter using ECDH-ES +func NewECDHESDecrypt(keyalg jwa.KeyEncryptionAlgorithm, contentalg jwa.ContentEncryptionAlgorithm, pubkey interface{}, apu, apv []byte, privkey interface{}) *ECDHESDecrypt { + return &ECDHESDecrypt{ + keyalg: keyalg, + contentalg: contentalg, + apu: apu, + apv: apv, + privkey: privkey, + pubkey: pubkey, + } +} + +// Algorithm returns the key encryption algorithm being used +func (kw ECDHESDecrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return kw.keyalg +} + +func DeriveZ(privkeyif interface{}, pubkeyif interface{}) ([]byte, error) { + switch privkeyif.(type) { + case x25519.PrivateKey: + privkey, ok := privkeyif.(x25519.PrivateKey) + if !ok { + return nil, fmt.Errorf(`private key must be x25519.PrivateKey, was: %T`, privkeyif) + } + pubkey, ok := pubkeyif.(x25519.PublicKey) + if !ok { + return nil, fmt.Errorf(`public key must be x25519.PublicKey, was: %T`, pubkeyif) + } + return curve25519.X25519(privkey.Seed(), pubkey) + default: + privkey, ok := privkeyif.(*ecdsa.PrivateKey) + if !ok { + return nil, fmt.Errorf(`private key must be *ecdsa.PrivateKey, was: %T`, privkeyif) + } + pubkey, ok := pubkeyif.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`public key must be *ecdsa.PublicKey, was: %T`, pubkeyif) + } + if !privkey.PublicKey.Curve.IsOnCurve(pubkey.X, pubkey.Y) { + return nil, fmt.Errorf(`public key must be on the same curve as private key`) + } + + z, _ := privkey.PublicKey.Curve.ScalarMult(pubkey.X, pubkey.Y, privkey.D.Bytes()) + zBytes := ecutil.AllocECPointBuffer(z, privkey.Curve) + defer ecutil.ReleaseECPointBuffer(zBytes) + zCopy := make([]byte, len(zBytes)) + copy(zCopy, zBytes) + return zCopy, nil + } +} + +func DeriveECDHES(alg, apu, apv []byte, privkey interface{}, pubkey interface{}, keysize uint32) ([]byte, error) { + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, keysize*8) + zBytes, err := DeriveZ(privkey, pubkey) + if err != nil { + return nil, fmt.Errorf(`unable to determine Z: %w`, err) + } + kdf := concatkdf.New(crypto.SHA256, alg, zBytes, apu, apv, pubinfo, []byte{}) + key := make([]byte, keysize) + if _, err := kdf.Read(key); err != nil { + return nil, fmt.Errorf(`failed to read kdf: %w`, err) + } + + return key, nil +} + +// Decrypt decrypts the encrypted key using ECDH-ES +func (kw ECDHESDecrypt) Decrypt(enckey []byte) ([]byte, error) { + var algBytes []byte + var keysize uint32 + + // Use keyalg except for when jwa.ECDH_ES + algBytes = []byte(kw.keyalg.String()) + + switch kw.keyalg { + case jwa.ECDH_ES: + // Create a content cipher from the content encryption algorithm + c, err := contentcipher.NewAES(kw.contentalg) + if err != nil { + return nil, fmt.Errorf(`failed to create content cipher for %s: %w`, kw.contentalg, err) + } + keysize = uint32(c.KeySize()) + algBytes = []byte(kw.contentalg.String()) + case jwa.ECDH_ES_A128KW: + keysize = 16 + case jwa.ECDH_ES_A192KW: + keysize = 24 + case jwa.ECDH_ES_A256KW: + keysize = 32 + default: + return nil, fmt.Errorf("invalid ECDH-ES key wrap algorithm (%s)", kw.keyalg) + } + + key, err := DeriveECDHES(algBytes, kw.apu, kw.apv, kw.privkey, kw.pubkey, keysize) + if err != nil { + return nil, fmt.Errorf(`failed to derive ECDHES encryption key: %w`, err) + } + + // ECDH-ES does not wrap keys + if kw.keyalg == jwa.ECDH_ES { + return key, nil + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf(`failed to create cipher for ECDH-ES key wrap: %w`, err) + } + + return Unwrap(block, enckey) +} + +// NewRSAOAEPEncrypt creates a new key encrypter using RSA OAEP +func NewRSAOAEPEncrypt(alg jwa.KeyEncryptionAlgorithm, pubkey *rsa.PublicKey) (*RSAOAEPEncrypt, error) { + switch alg { + case jwa.RSA_OAEP, jwa.RSA_OAEP_256: + default: + return nil, fmt.Errorf("invalid RSA OAEP encrypt algorithm (%s)", alg) + } + return &RSAOAEPEncrypt{ + alg: alg, + pubkey: pubkey, + }, nil +} + +// NewRSAPKCSEncrypt creates a new key encrypter using PKCS1v15 +func NewRSAPKCSEncrypt(alg jwa.KeyEncryptionAlgorithm, pubkey *rsa.PublicKey) (*RSAPKCSEncrypt, error) { + switch alg { + case jwa.RSA1_5: + default: + return nil, fmt.Errorf("invalid RSA PKCS encrypt algorithm (%s)", alg) + } + + return &RSAPKCSEncrypt{ + alg: alg, + pubkey: pubkey, + }, nil +} + +// Algorithm returns the key encryption algorithm being used +func (e RSAPKCSEncrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return e.alg +} + +func (e *RSAPKCSEncrypt) SetKeyID(v string) { + e.keyID = v +} + +// KeyID returns the key ID associated with this encrypter +func (e RSAPKCSEncrypt) KeyID() string { + return e.keyID +} + +// Algorithm returns the key encryption algorithm being used +func (e RSAOAEPEncrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return e.alg +} + +func (e *RSAOAEPEncrypt) SetKeyID(v string) { + e.keyID = v +} + +// KeyID returns the key ID associated with this encrypter +func (e RSAOAEPEncrypt) KeyID() string { + return e.keyID +} + +// KeyEncrypt encrypts the content encryption key using RSA PKCS1v15 +func (e RSAPKCSEncrypt) Encrypt(cek []byte) (keygen.ByteSource, error) { + if e.alg != jwa.RSA1_5 { + return nil, fmt.Errorf("invalid RSA PKCS encrypt algorithm (%s)", e.alg) + } + encrypted, err := rsa.EncryptPKCS1v15(rand.Reader, e.pubkey, cek) + if err != nil { + return nil, fmt.Errorf(`failed to encrypt using PKCS1v15: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +// KeyEncrypt encrypts the content encryption key using RSA OAEP +func (e RSAOAEPEncrypt) Encrypt(cek []byte) (keygen.ByteSource, error) { + var hash hash.Hash + switch e.alg { + case jwa.RSA_OAEP: + hash = sha1.New() + case jwa.RSA_OAEP_256: + hash = sha256.New() + default: + return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256 required`) + } + encrypted, err := rsa.EncryptOAEP(hash, rand.Reader, e.pubkey, cek, []byte{}) + if err != nil { + return nil, fmt.Errorf(`failed to OAEP encrypt: %w`, err) + } + return keygen.ByteKey(encrypted), nil +} + +// NewRSAPKCS15Decrypt creates a new decrypter using RSA PKCS1v15 +func NewRSAPKCS15Decrypt(alg jwa.KeyEncryptionAlgorithm, privkey *rsa.PrivateKey, keysize int) *RSAPKCS15Decrypt { + generator := keygen.NewRandom(keysize * 2) + return &RSAPKCS15Decrypt{ + alg: alg, + privkey: privkey, + generator: generator, + } +} + +// Algorithm returns the key encryption algorithm being used +func (d RSAPKCS15Decrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return d.alg +} + +// Decrypt decrypts the encrypted key using RSA PKCS1v1.5 +func (d RSAPKCS15Decrypt) Decrypt(enckey []byte) ([]byte, error) { + // Hey, these notes and workarounds were stolen from go-jose + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + expectedlen := d.privkey.PublicKey.N.BitLen() / 8 + if expectedlen != len(enckey) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, fmt.Errorf( + "input size for key decrypt is incorrect (expected %d, got %d)", + expectedlen, + len(enckey), + ) + } + + var err error + + bk, err := d.generator.Generate() + if err != nil { + return nil, fmt.Errorf(`failed to generate key`) + } + cek := bk.Bytes() + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + err = rsa.DecryptPKCS1v15SessionKey(rand.Reader, d.privkey, enckey, cek) + if err != nil { + return nil, fmt.Errorf(`failed to decrypt via PKCS1v15: %w`, err) + } + + return cek, nil +} + +// NewRSAOAEPDecrypt creates a new key decrypter using RSA OAEP +func NewRSAOAEPDecrypt(alg jwa.KeyEncryptionAlgorithm, privkey *rsa.PrivateKey) (*RSAOAEPDecrypt, error) { + switch alg { + case jwa.RSA_OAEP, jwa.RSA_OAEP_256: + default: + return nil, fmt.Errorf("invalid RSA OAEP decrypt algorithm (%s)", alg) + } + + return &RSAOAEPDecrypt{ + alg: alg, + privkey: privkey, + }, nil +} + +// Algorithm returns the key encryption algorithm being used +func (d RSAOAEPDecrypt) Algorithm() jwa.KeyEncryptionAlgorithm { + return d.alg +} + +// Decrypt decrypts the encrypted key using RSA OAEP +func (d RSAOAEPDecrypt) Decrypt(enckey []byte) ([]byte, error) { + var hash hash.Hash + switch d.alg { + case jwa.RSA_OAEP: + hash = sha1.New() + case jwa.RSA_OAEP_256: + hash = sha256.New() + default: + return nil, fmt.Errorf(`failed to generate key encrypter for RSA-OAEP: RSA_OAEP/RSA_OAEP_256 required`) + } + return rsa.DecryptOAEP(hash, rand.Reader, d.privkey, enckey, []byte{}) +} + +// Decrypt for DirectDecrypt does not do anything other than +// return a copy of the embedded key +func (d DirectDecrypt) Decrypt() ([]byte, error) { + cek := make([]byte, len(d.Key)) + copy(cek, d.Key) + return cek, nil +} + +var keywrapDefaultIV = []byte{0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6} + +const keywrapChunkLen = 8 + +func Wrap(kek cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, fmt.Errorf(`keywrap input must be 8 byte blocks`) + } + + n := len(cek) / keywrapChunkLen + r := make([][]byte, n) + + for i := 0; i < n; i++ { + r[i] = make([]byte, keywrapChunkLen) + copy(r[i], cek[i*keywrapChunkLen:]) + } + + buffer := make([]byte, keywrapChunkLen*2) + tBytes := make([]byte, keywrapChunkLen) + copy(buffer, keywrapDefaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[keywrapChunkLen:], r[t%n]) + + kek.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < keywrapChunkLen; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[keywrapChunkLen:]) + } + + out := make([]byte, (n+1)*keywrapChunkLen) + copy(out, buffer[:keywrapChunkLen]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +func Unwrap(block cipher.Block, ciphertxt []byte) ([]byte, error) { + if len(ciphertxt)%keywrapChunkLen != 0 { + return nil, fmt.Errorf(`keyunwrap input must be %d byte blocks`, keywrapChunkLen) + } + + n := (len(ciphertxt) / keywrapChunkLen) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, keywrapChunkLen) + copy(r[i], ciphertxt[(i+1)*keywrapChunkLen:]) + } + + buffer := make([]byte, keywrapChunkLen*2) + tBytes := make([]byte, keywrapChunkLen) + copy(buffer[:keywrapChunkLen], ciphertxt[:keywrapChunkLen]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < keywrapChunkLen; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[keywrapChunkLen:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[keywrapChunkLen:]) + } + + if subtle.ConstantTimeCompare(buffer[:keywrapChunkLen], keywrapDefaultIV) == 0 { + return nil, fmt.Errorf(`key unwrap: failed to unwrap key`) + } + + out := make([]byte, n*keywrapChunkLen) + for i := range r { + copy(out[i*keywrapChunkLen:], r[i]) + } + + return out, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/interface.go new file mode 100644 index 0000000000..10543c0566 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/interface.go @@ -0,0 +1,75 @@ +package keygen + +import ( + "crypto/ecdsa" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +type Generator interface { + Size() int + Generate() (ByteSource, error) +} + +// StaticKeyGenerate uses a static byte buffer to provide keys. +type Static []byte + +// RandomKeyGenerate generates random keys +type Random struct { + keysize int +} + +// EcdhesKeyGenerate generates keys using ECDH-ES algorithm / EC-DSA curve +type Ecdhes struct { + pubkey *ecdsa.PublicKey + keysize int + algorithm jwa.KeyEncryptionAlgorithm + enc jwa.ContentEncryptionAlgorithm + apu []byte + apv []byte +} + +// X25519KeyGenerate generates keys using ECDH-ES algorithm / X25519 curve +type X25519 struct { + algorithm jwa.KeyEncryptionAlgorithm + enc jwa.ContentEncryptionAlgorithm + keysize int + pubkey x25519.PublicKey +} + +// ByteKey is a generated key that only has the key's byte buffer +// as its instance data. If a key needs to do more, such as providing +// values to be set in a JWE header, that key type wraps a ByteKey +type ByteKey []byte + +// ByteWithECPublicKey holds the EC private key that generated +// the key along with the key itself. This is required to set the +// proper values in the JWE headers +type ByteWithECPublicKey struct { + ByteKey + PublicKey interface{} +} + +type ByteWithIVAndTag struct { + ByteKey + IV []byte + Tag []byte +} + +type ByteWithSaltAndCount struct { + ByteKey + Salt []byte + Count int +} + +// ByteSource is an interface for things that return a byte sequence. +// This is used for KeyGenerator so that the result of computations can +// carry more than just the generate byte sequence. +type ByteSource interface { + Bytes() []byte +} + +type Setter interface { + Set(string, interface{}) error +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/keygen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/keygen.go new file mode 100644 index 0000000000..0d9c7ece9b --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/internal/keygen/keygen.go @@ -0,0 +1,197 @@ +package keygen + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/binary" + "fmt" + "io" + + "golang.org/x/crypto/curve25519" + + "github.com/lestrrat-go/jwx/v2/internal/ecutil" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +// Bytes returns the byte from this ByteKey +func (k ByteKey) Bytes() []byte { + return []byte(k) +} + +// Size returns the size of the key +func (g Static) Size() int { + return len(g) +} + +// Generate returns the key +func (g Static) Generate() (ByteSource, error) { + buf := make([]byte, g.Size()) + copy(buf, g) + return ByteKey(buf), nil +} + +// NewRandom creates a new Generator that returns +// random bytes +func NewRandom(n int) Random { + return Random{keysize: n} +} + +// Size returns the key size +func (g Random) Size() int { + return g.keysize +} + +// Generate generates a random new key +func (g Random) Generate() (ByteSource, error) { + buf := make([]byte, g.keysize) + if _, err := io.ReadFull(rand.Reader, buf); err != nil { + return nil, fmt.Errorf(`failed to read from rand.Reader: %w`, err) + } + return ByteKey(buf), nil +} + +// NewEcdhes creates a new key generator using ECDH-ES +func NewEcdhes(alg jwa.KeyEncryptionAlgorithm, enc jwa.ContentEncryptionAlgorithm, keysize int, pubkey *ecdsa.PublicKey, apu, apv []byte) (*Ecdhes, error) { + return &Ecdhes{ + algorithm: alg, + enc: enc, + keysize: keysize, + pubkey: pubkey, + apu: apu, + apv: apv, + }, nil +} + +// Size returns the key size associated with this generator +func (g Ecdhes) Size() int { + return g.keysize +} + +// Generate generates new keys using ECDH-ES +func (g Ecdhes) Generate() (ByteSource, error) { + priv, err := ecdsa.GenerateKey(g.pubkey.Curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf(`failed to generate key for ECDH-ES: %w`, err) + } + + var algorithm string + if g.algorithm == jwa.ECDH_ES { + algorithm = g.enc.String() + } else { + algorithm = g.algorithm.String() + } + + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, uint32(g.keysize)*8) + + if !priv.PublicKey.Curve.IsOnCurve(g.pubkey.X, g.pubkey.Y) { + return nil, fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`) + } + z, _ := priv.PublicKey.Curve.ScalarMult(g.pubkey.X, g.pubkey.Y, priv.D.Bytes()) + zBytes := ecutil.AllocECPointBuffer(z, priv.PublicKey.Curve) + defer ecutil.ReleaseECPointBuffer(zBytes) + kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, g.apu, g.apv, pubinfo, []byte{}) + kek := make([]byte, g.keysize) + if _, err := kdf.Read(kek); err != nil { + return nil, fmt.Errorf(`failed to read kdf: %w`, err) + } + + return ByteWithECPublicKey{ + PublicKey: &priv.PublicKey, + ByteKey: ByteKey(kek), + }, nil +} + +// NewX25519 creates a new key generator using ECDH-ES +func NewX25519(alg jwa.KeyEncryptionAlgorithm, enc jwa.ContentEncryptionAlgorithm, keysize int, pubkey x25519.PublicKey) (*X25519, error) { + return &X25519{ + algorithm: alg, + enc: enc, + keysize: keysize, + pubkey: pubkey, + }, nil +} + +// Size returns the key size associated with this generator +func (g X25519) Size() int { + return g.keysize +} + +// Generate generates new keys using ECDH-ES +func (g X25519) Generate() (ByteSource, error) { + pub, priv, err := x25519.GenerateKey(rand.Reader) + if err != nil { + return nil, fmt.Errorf(`failed to generate key for X25519: %w`, err) + } + + var algorithm string + if g.algorithm == jwa.ECDH_ES { + algorithm = g.enc.String() + } else { + algorithm = g.algorithm.String() + } + + pubinfo := make([]byte, 4) + binary.BigEndian.PutUint32(pubinfo, uint32(g.keysize)*8) + + zBytes, err := curve25519.X25519(priv.Seed(), g.pubkey) + if err != nil { + return nil, fmt.Errorf(`failed to compute Z: %w`, err) + } + kdf := concatkdf.New(crypto.SHA256, []byte(algorithm), zBytes, []byte{}, []byte{}, pubinfo, []byte{}) + kek := make([]byte, g.keysize) + if _, err := kdf.Read(kek); err != nil { + return nil, fmt.Errorf(`failed to read kdf: %w`, err) + } + + return ByteWithECPublicKey{ + PublicKey: pub, + ByteKey: ByteKey(kek), + }, nil +} + +// HeaderPopulate populates the header with the required EC-DSA public key +// information ('epk' key) +func (k ByteWithECPublicKey) Populate(h Setter) error { + key, err := jwk.FromRaw(k.PublicKey) + if err != nil { + return fmt.Errorf(`failed to create JWK: %w`, err) + } + + if err := h.Set("epk", key); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + return nil +} + +// HeaderPopulate populates the header with the required AES GCM +// parameters ('iv' and 'tag') +func (k ByteWithIVAndTag) Populate(h Setter) error { + if err := h.Set("iv", k.IV); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + if err := h.Set("tag", k.Tag); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + return nil +} + +// HeaderPopulate populates the header with the required PBES2 +// parameters ('p2s' and 'p2c') +func (k ByteWithSaltAndCount) Populate(h Setter) error { + if err := h.Set("p2c", k.Count); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + if err := h.Set("p2s", k.Salt); err != nil { + return fmt.Errorf(`failed to write header: %w`, err) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/io.go new file mode 100644 index 0000000000..e101199847 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/io.go @@ -0,0 +1,42 @@ +// Automatically generated by internal/cmd/genreadfile/main.go. DO NOT EDIT + +package jwe + +import ( + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (*Message, error) { + var parseOptions []ParseOption + var readFileOptions []ReadFileOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } else { + readFileOptions = append(readFileOptions, option) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + srcFS = option.Value().(fs.FS) + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/jwe.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/jwe.go new file mode 100644 index 0000000000..dfd86132ed --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/jwe.go @@ -0,0 +1,805 @@ +//go:generate ../tools/cmd/genjwe.sh + +// Package jwe implements JWE as described in https://tools.ietf.org/html/rfc7516 +package jwe + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "io" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/jwk" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc" + "github.com/lestrrat-go/jwx/v2/jwe/internal/keygen" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +const ( + fmtInvalid = iota + fmtCompact + fmtJSON + fmtJSONPretty + fmtMax +) + +var _ = fmtInvalid +var _ = fmtMax + +var registry = json.NewRegistry() + +type recipientBuilder struct { + alg jwa.KeyEncryptionAlgorithm + key interface{} + headers Headers +} + +func (b *recipientBuilder) Build(cek []byte, calg jwa.ContentEncryptionAlgorithm, cc *content_crypt.Generic) (Recipient, []byte, error) { + // we need the raw key + rawKey := b.key + + var keyID string + if jwkKey, ok := b.key.(jwk.Key); ok { + // Meanwhile, grab the kid as well + keyID = jwkKey.KeyID() + + var raw interface{} + if err := jwkKey.Raw(&raw); err != nil { + return nil, nil, fmt.Errorf(`failed to retrieve raw key out of %T: %w`, b.key, err) + } + + rawKey = raw + } + + // First, create a key encryptor + var enc keyenc.Encrypter + switch b.alg { + case jwa.RSA1_5: + var pubkey rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, rawKey); err != nil { + return nil, nil, fmt.Errorf(`failed to generate public key from key (%T): %w`, rawKey, err) + } + + v, err := keyenc.NewRSAPKCSEncrypt(b.alg, &pubkey) + if err != nil { + return nil, nil, fmt.Errorf(`failed to create RSA PKCS encrypter: %w`, err) + } + enc = v + case jwa.RSA_OAEP, jwa.RSA_OAEP_256: + var pubkey rsa.PublicKey + if err := keyconv.RSAPublicKey(&pubkey, rawKey); err != nil { + return nil, nil, fmt.Errorf(`failed to generate public key from key (%T): %w`, rawKey, err) + } + + v, err := keyenc.NewRSAOAEPEncrypt(b.alg, &pubkey) + if err != nil { + return nil, nil, fmt.Errorf(`failed to create RSA OAEP encrypter: %w`, err) + } + enc = v + case jwa.A128KW, jwa.A192KW, jwa.A256KW, + jwa.A128GCMKW, jwa.A192GCMKW, jwa.A256GCMKW, + jwa.PBES2_HS256_A128KW, jwa.PBES2_HS384_A192KW, jwa.PBES2_HS512_A256KW: + sharedkey, ok := rawKey.([]byte) + if !ok { + return nil, nil, fmt.Errorf(`invalid key: []byte required (%T)`, rawKey) + } + + var err error + switch b.alg { + case jwa.A128KW, jwa.A192KW, jwa.A256KW: + enc, err = keyenc.NewAES(b.alg, sharedkey) + case jwa.PBES2_HS256_A128KW, jwa.PBES2_HS384_A192KW, jwa.PBES2_HS512_A256KW: + enc, err = keyenc.NewPBES2Encrypt(b.alg, sharedkey) + default: + enc, err = keyenc.NewAESGCMEncrypt(b.alg, sharedkey) + } + if err != nil { + return nil, nil, fmt.Errorf(`failed to create key wrap encrypter: %w`, err) + } + // NOTE: there was formerly a restriction, introduced + // in PR #26, which disallowed certain key/content + // algorithm combinations. This seemed bogus, and + // interop with the jose tool demonstrates it. + case jwa.ECDH_ES, jwa.ECDH_ES_A128KW, jwa.ECDH_ES_A192KW, jwa.ECDH_ES_A256KW: + var keysize int + switch b.alg { + case jwa.ECDH_ES: + // https://tools.ietf.org/html/rfc7518#page-15 + // In Direct Key Agreement mode, the output of the Concat KDF MUST be a + // key of the same length as that used by the "enc" algorithm. + keysize = cc.KeySize() + case jwa.ECDH_ES_A128KW: + keysize = 16 + case jwa.ECDH_ES_A192KW: + keysize = 24 + case jwa.ECDH_ES_A256KW: + keysize = 32 + } + + switch key := rawKey.(type) { + case x25519.PublicKey: + var apu, apv []byte + if hdrs := b.headers; hdrs != nil { + apu = hdrs.AgreementPartyUInfo() + apv = hdrs.AgreementPartyVInfo() + } + + v, err := keyenc.NewECDHESEncrypt(b.alg, calg, keysize, rawKey, apu, apv) + if err != nil { + return nil, nil, fmt.Errorf(`failed to create ECDHS key wrap encrypter: %w`, err) + } + enc = v + default: + var pubkey ecdsa.PublicKey + if err := keyconv.ECDSAPublicKey(&pubkey, rawKey); err != nil { + return nil, nil, fmt.Errorf(`failed to generate public key from key (%T): %w`, key, err) + } + + var apu, apv []byte + if hdrs := b.headers; hdrs != nil { + apu = hdrs.AgreementPartyUInfo() + apv = hdrs.AgreementPartyVInfo() + } + + v, err := keyenc.NewECDHESEncrypt(b.alg, calg, keysize, &pubkey, apu, apv) + if err != nil { + return nil, nil, fmt.Errorf(`failed to create ECDHS key wrap encrypter: %w`, err) + } + enc = v + } + case jwa.DIRECT: + sharedkey, ok := rawKey.([]byte) + if !ok { + return nil, nil, fmt.Errorf("invalid key: []byte required") + } + enc, _ = keyenc.NewNoop(b.alg, sharedkey) + default: + return nil, nil, fmt.Errorf(`invalid key encryption algorithm (%s)`, b.alg) + } + + if keyID != "" { + enc.SetKeyID(keyID) + } + + r := NewRecipient() + if hdrs := b.headers; hdrs != nil { + _ = r.SetHeaders(hdrs) + } + + if err := r.Headers().Set(AlgorithmKey, b.alg); err != nil { + return nil, nil, fmt.Errorf(`failed to set header: %w`, err) + } + if v := enc.KeyID(); v != "" { + if err := r.Headers().Set(KeyIDKey, v); err != nil { + return nil, nil, fmt.Errorf(`failed to set header: %w`, err) + } + } + + var rawCEK []byte + enckey, err := enc.Encrypt(cek) + if err != nil { + return nil, nil, fmt.Errorf(`failed to encrypt key: %w`, err) + } + if enc.Algorithm() == jwa.ECDH_ES || enc.Algorithm() == jwa.DIRECT { + rawCEK = enckey.Bytes() + } else { + if err := r.SetEncryptedKey(enckey.Bytes()); err != nil { + return nil, nil, fmt.Errorf(`failed to set encrypted key: %w`, err) + } + } + + if hp, ok := enckey.(populater); ok { + if err := hp.Populate(r.Headers()); err != nil { + return nil, nil, fmt.Errorf(`failed to populate: %w`, err) + } + } + + return r, rawCEK, nil +} + +// Encrypt generates a JWE message for the given payload and returns +// it in serialized form, which can be in either compact or +// JSON format. Default is compact. +// +// You must pass at least one key to `jwe.Encrypt()` by using `jwe.WithKey()` +// option. +// +// jwe.Encrypt(payload, jwe.WithKey(alg, key)) +// jwe.Encrypt(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2)) +// +// Note that in the second example the `jws.WithJSON()` option is +// specified as well. This is because the compact serialization +// format does not support multiple recipients, and users must +// specifically ask for the JSON serialization format. +// +// Read the documentation for `jwe.WithKey()` to learn more about the +// possible values that can be used for `alg` and `key`. +// +// Look for options that return `jwe.EncryptOption` or `jws.EncryptDecryptOption` +// for a complete list of options that can be passed to this function. +func Encrypt(payload []byte, options ...EncryptOption) ([]byte, error) { + // default content encryption algorithm + calg := jwa.A256GCM + + // default compression is "none" + compression := jwa.NoCompress + + format := fmtCompact + + // builds each "recipient" with encrypted_key and headers + var builders []*recipientBuilder + + var protected Headers + var mergeProtected bool + var useRawCEK bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identKey{}: + data := option.Value().(*withKey) + v, ok := data.alg.(jwa.KeyEncryptionAlgorithm) + if !ok { + return nil, fmt.Errorf(`jwe.Encrypt: expected alg to be jwa.KeyEncryptionAlgorithm, but got %T`, data.alg) + } + + switch v { + case jwa.DIRECT, jwa.ECDH_ES: + useRawCEK = true + } + + builders = append(builders, &recipientBuilder{ + alg: v, + key: data.key, + headers: data.headers, + }) + case identContentEncryptionAlgorithm{}: + calg = option.Value().(jwa.ContentEncryptionAlgorithm) + case identCompress{}: + compression = option.Value().(jwa.CompressionAlgorithm) + case identMergeProtectedHeaders{}: + mergeProtected = option.Value().(bool) + case identProtectedHeaders{}: + v := option.Value().(Headers) + if !mergeProtected || protected == nil { + protected = v + } else { + ctx := context.TODO() + merged, err := protected.Merge(ctx, v) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to merge headers: %w`, err) + } + protected = merged + } + case identSerialization{}: + format = option.Value().(int) + } + } + + // We need to have at least one builder + switch l := len(builders); { + case l == 0: + return nil, fmt.Errorf(`jwe.Encrypt: missing key encryption builders: use jwe.WithKey() to specify one`) + case l > 1: + if format == fmtCompact { + return nil, fmt.Errorf(`jwe.Encrypt: cannot use compact serialization when multiple recipients exist (check the number of WithKey() argument, or use WithJSON())`) + } + } + + if useRawCEK { + if len(builders) != 1 { + return nil, fmt.Errorf(`jwe.Encrypt: multiple recipients for ECDH-ES/DIRECT mode supported`) + } + } + + // There is exactly one content encrypter. + contentcrypt, err := content_crypt.NewGeneric(calg) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to create AES encrypter: %w`, err) + } + + generator := keygen.NewRandom(contentcrypt.KeySize()) + bk, err := generator.Generate() + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to generate key: %w`, err) + } + cek := bk.Bytes() + + recipients := make([]Recipient, len(builders)) + for i, builder := range builders { + // some builders require hint from the contentcrypt object + r, rawCEK, err := builder.Build(cek, calg, contentcrypt) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to create recipient #%d: %w`, i, err) + } + recipients[i] = r + + // Kinda feels weird, but if useRawCEK == true, we asserted earlier + // that len(builders) == 1, so this is OK + if useRawCEK { + cek = rawCEK + } + } + + if protected == nil { + protected = NewHeaders() + } + + if err := protected.Set(ContentEncryptionKey, calg); err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to set "enc" in protected header: %w`, err) + } + + if compression != jwa.NoCompress { + payload, err = compress(payload) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to compress payload before encryption: %w`, err) + } + if err := protected.Set(CompressionKey, compression); err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to set "zip" in protected header: %w`, err) + } + } + + // If there's only one recipient, you want to include that in the + // protected header + if len(recipients) == 1 { + h, err := protected.Merge(context.TODO(), recipients[0].Headers()) + if err != nil { + return nil, fmt.Errorf(`jwe.Encrypt: failed to merge protected headers: %w`, err) + } + protected = h + } + + aad, err := protected.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to base64 encode protected headers: %w`, err) + } + + iv, ciphertext, tag, err := contentcrypt.Encrypt(cek, payload, aad) + if err != nil { + return nil, fmt.Errorf(`failed to encrypt payload: %w`, err) + } + + msg := NewMessage() + + if err := msg.Set(CipherTextKey, ciphertext); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err) + } + if err := msg.Set(InitializationVectorKey, iv); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err) + } + if err := msg.Set(ProtectedHeadersKey, protected); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err) + } + if err := msg.Set(RecipientsKey, recipients); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err) + } + if err := msg.Set(TagKey, tag); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err) + } + + switch format { + case fmtCompact: + return Compact(msg) + case fmtJSON: + return json.Marshal(msg) + case fmtJSONPretty: + return json.MarshalIndent(msg, "", " ") + default: + return nil, fmt.Errorf(`jwe.Encrypt: invalid serialization`) + } +} + +type decryptCtx struct { + msg *Message + aad []byte + computedAad []byte + keyProviders []KeyProvider + protectedHeaders Headers +} + +// Decrypt takes the key encryption algorithm and the corresponding +// key to decrypt the JWE message, and returns the decrypted payload. +// The JWE message can be either compact or full JSON format. +// +// `alg` accepts a `jwa.KeyAlgorithm` for convenience so you can directly pass +// the result of `(jwk.Key).Algorithm()`, but in practice it must be of type +// `jwa.KeyEncryptionAlgorithm` or otherwise it will cause an error. +// +// `key` must be a private key. It can be either in its raw format (e.g. *rsa.PrivateKey) or a jwk.Key +func Decrypt(buf []byte, options ...DecryptOption) ([]byte, error) { + var keyProviders []KeyProvider + var keyUsed interface{} + + var dst *Message + //nolint:forcetypeassert + for _, option := range options { + switch option.Ident() { + case identMessage{}: + dst = option.Value().(*Message) + case identKeyProvider{}: + keyProviders = append(keyProviders, option.Value().(KeyProvider)) + case identKeyUsed{}: + keyUsed = option.Value() + case identKey{}: + pair := option.Value().(*withKey) + alg, ok := pair.alg.(jwa.KeyEncryptionAlgorithm) + if !ok { + return nil, fmt.Errorf(`WithKey() option must be specified using jwa.KeyEncryptionAlgorithm (got %T)`, pair.alg) + } + keyProviders = append(keyProviders, &staticKeyProvider{ + alg: alg, + key: pair.key, + }) + } + } + + if len(keyProviders) < 1 { + return nil, fmt.Errorf(`jwe.Decrypt: no key providers have been provided (see jwe.WithKey(), jwe.WithKeySet(), and jwe.WithKeyProvider()`) + } + + msg, err := parseJSONOrCompact(buf, true) + if err != nil { + return nil, fmt.Errorf(`failed to parse buffer for Decrypt: %w`, err) + } + + // Process things that are common to the message + ctx := context.TODO() + h, err := msg.protectedHeaders.Clone(ctx) + if err != nil { + return nil, fmt.Errorf(`failed to copy protected headers: %w`, err) + } + h, err = h.Merge(ctx, msg.unprotectedHeaders) + if err != nil { + return nil, fmt.Errorf(`failed to merge headers for message decryption: %w`, err) + } + + var aad []byte + if aadContainer := msg.authenticatedData; aadContainer != nil { + aad = base64.Encode(aadContainer) + } + + var computedAad []byte + if len(msg.rawProtectedHeaders) > 0 { + computedAad = msg.rawProtectedHeaders + } else { + // this is probably not required once msg.Decrypt is deprecated + var err error + computedAad, err = msg.protectedHeaders.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode protected headers: %w`, err) + } + } + + // for each recipient, attempt to match the key providers + // if we have no recipients, pretend like we only have one + recipients := msg.recipients + if len(recipients) == 0 { + r := NewRecipient() + if err := r.SetHeaders(msg.protectedHeaders); err != nil { + return nil, fmt.Errorf(`failed to set headers to recipient: %w`, err) + } + recipients = append(recipients, r) + } + + var dctx decryptCtx + + dctx.aad = aad + dctx.computedAad = computedAad + dctx.msg = msg + dctx.keyProviders = keyProviders + dctx.protectedHeaders = h + + var lastError error + for _, recipient := range recipients { + decrypted, err := dctx.try(ctx, recipient, keyUsed) + if err != nil { + lastError = err + continue + } + if dst != nil { + *dst = *msg + dst.rawProtectedHeaders = nil + dst.storeProtectedHeaders = false + } + return decrypted, nil + } + return nil, fmt.Errorf(`jwe.Decrypt: failed to decrypt any of the recipients (last error = %w)`, lastError) +} + +func (dctx *decryptCtx) try(ctx context.Context, recipient Recipient, keyUsed interface{}) ([]byte, error) { + var tried int + var lastError error + for i, kp := range dctx.keyProviders { + var sink algKeySink + if err := kp.FetchKeys(ctx, &sink, recipient, dctx.msg); err != nil { + return nil, fmt.Errorf(`key provider %d failed: %w`, i, err) + } + + for _, pair := range sink.list { + tried++ + // alg is converted here because pair.alg is of type jwa.KeyAlgorithm. + // this may seem ugly, but we're trying to avoid declaring separate + // structs for `alg jwa.KeyAlgorithm` and `alg jwa.SignatureAlgorithm` + //nolint:forcetypeassert + alg := pair.alg.(jwa.KeyEncryptionAlgorithm) + key := pair.key + + decrypted, err := dctx.decryptKey(ctx, alg, key, recipient) + if err != nil { + lastError = err + continue + } + + if keyUsed != nil { + if err := blackmagic.AssignIfCompatible(keyUsed, key); err != nil { + return nil, fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, keyUsed, err) + } + } + return decrypted, nil + } + } + return nil, fmt.Errorf(`jwe.Decrypt: tried %d keys, but failed to match any of the keys with recipient (last error = %s)`, tried, lastError) +} + +func (dctx *decryptCtx) decryptKey(ctx context.Context, alg jwa.KeyEncryptionAlgorithm, key interface{}, recipient Recipient) ([]byte, error) { + if jwkKey, ok := key.(jwk.Key); ok { + var raw interface{} + if err := jwkKey.Raw(&raw); err != nil { + return nil, fmt.Errorf(`failed to retrieve raw key from %T: %w`, key, err) + } + key = raw + } + + dec := newDecrypter(alg, dctx.msg.protectedHeaders.ContentEncryption(), key). + AuthenticatedData(dctx.aad). + ComputedAuthenticatedData(dctx.computedAad). + InitializationVector(dctx.msg.initializationVector). + Tag(dctx.msg.tag) + + if recipient.Headers().Algorithm() != alg { + // algorithms don't match + return nil, fmt.Errorf(`jwe.Decrypt: key and recipient algorithms do not match`) + } + + h2, err := dctx.protectedHeaders.Clone(ctx) + if err != nil { + return nil, fmt.Errorf(`jwe.Decrypt: failed to copy headers (1): %w`, err) + } + + h2, err = h2.Merge(ctx, recipient.Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to copy headers (2): %w`, err) + } + + switch alg { + case jwa.ECDH_ES, jwa.ECDH_ES_A128KW, jwa.ECDH_ES_A192KW, jwa.ECDH_ES_A256KW: + epkif, ok := h2.Get(EphemeralPublicKeyKey) + if !ok { + return nil, fmt.Errorf(`failed to get 'epk' field`) + } + switch epk := epkif.(type) { + case jwk.ECDSAPublicKey: + var pubkey ecdsa.PublicKey + if err := epk.Raw(&pubkey); err != nil { + return nil, fmt.Errorf(`failed to get public key: %w`, err) + } + dec.PublicKey(&pubkey) + case jwk.OKPPublicKey: + var pubkey interface{} + if err := epk.Raw(&pubkey); err != nil { + return nil, fmt.Errorf(`failed to get public key: %w`, err) + } + dec.PublicKey(pubkey) + default: + return nil, fmt.Errorf("unexpected 'epk' type %T for alg %s", epkif, alg) + } + + if apu := h2.AgreementPartyUInfo(); len(apu) > 0 { + dec.AgreementPartyUInfo(apu) + } + if apv := h2.AgreementPartyVInfo(); len(apv) > 0 { + dec.AgreementPartyVInfo(apv) + } + case jwa.A128GCMKW, jwa.A192GCMKW, jwa.A256GCMKW: + ivB64, ok := h2.Get(InitializationVectorKey) + if !ok { + return nil, fmt.Errorf(`failed to get 'iv' field`) + } + ivB64Str, ok := ivB64.(string) + if !ok { + return nil, fmt.Errorf("unexpected type for 'iv': %T", ivB64) + } + tagB64, ok := h2.Get(TagKey) + if !ok { + return nil, fmt.Errorf(`failed to get 'tag' field`) + } + tagB64Str, ok := tagB64.(string) + if !ok { + return nil, fmt.Errorf("unexpected type for 'tag': %T", tagB64) + } + iv, err := base64.DecodeString(ivB64Str) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'iv': %w`, err) + } + tag, err := base64.DecodeString(tagB64Str) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'tag': %w`, err) + } + dec.KeyInitializationVector(iv) + dec.KeyTag(tag) + case jwa.PBES2_HS256_A128KW, jwa.PBES2_HS384_A192KW, jwa.PBES2_HS512_A256KW: + saltB64, ok := h2.Get(SaltKey) + if !ok { + return nil, fmt.Errorf(`failed to get 'p2s' field`) + } + saltB64Str, ok := saltB64.(string) + if !ok { + return nil, fmt.Errorf("unexpected type for 'p2s': %T", saltB64) + } + + count, ok := h2.Get(CountKey) + if !ok { + return nil, fmt.Errorf(`failed to get 'p2c' field`) + } + countFlt, ok := count.(float64) + if !ok { + return nil, fmt.Errorf("unexpected type for 'p2c': %T", count) + } + salt, err := base64.DecodeString(saltB64Str) + if err != nil { + return nil, fmt.Errorf(`failed to b64-decode 'salt': %w`, err) + } + dec.KeySalt(salt) + dec.KeyCount(int(countFlt)) + } + + plaintext, err := dec.Decrypt(recipient.EncryptedKey(), dctx.msg.cipherText) + if err != nil { + return nil, fmt.Errorf(`jwe.Decrypt: decryption failed: %w`, err) + } + + if h2.Compression() == jwa.Deflate { + buf, err := uncompress(plaintext) + if err != nil { + return nil, fmt.Errorf(`jwe.Derypt: failed to uncompress payload: %w`, err) + } + plaintext = buf + } + + if plaintext == nil { + return nil, fmt.Errorf(`failed to find matching recipient`) + } + + return plaintext, nil +} + +// Parse parses the JWE message into a Message object. The JWE message +// can be either compact or full JSON format. +// +// Parse() currently does not take any options, but the API accepts it +// in anticipation of future addition. +func Parse(buf []byte, _ ...ParseOption) (*Message, error) { + return parseJSONOrCompact(buf, false) +} + +func parseJSONOrCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) { + buf = bytes.TrimSpace(buf) + if len(buf) == 0 { + return nil, fmt.Errorf(`empty buffer`) + } + + if buf[0] == '{' { + return parseJSON(buf, storeProtectedHeaders) + } + return parseCompact(buf, storeProtectedHeaders) +} + +// ParseString is the same as Parse, but takes a string. +func ParseString(s string) (*Message, error) { + return Parse([]byte(s)) +} + +// ParseReader is the same as Parse, but takes an io.Reader. +func ParseReader(src io.Reader) (*Message, error) { + buf, err := io.ReadAll(src) + if err != nil { + return nil, fmt.Errorf(`failed to read from io.Reader: %w`, err) + } + return Parse(buf) +} + +func parseJSON(buf []byte, storeProtectedHeaders bool) (*Message, error) { + m := NewMessage() + m.storeProtectedHeaders = storeProtectedHeaders + if err := json.Unmarshal(buf, &m); err != nil { + return nil, fmt.Errorf(`failed to parse JSON: %w`, err) + } + return m, nil +} + +func parseCompact(buf []byte, storeProtectedHeaders bool) (*Message, error) { + parts := bytes.Split(buf, []byte{'.'}) + if len(parts) != 5 { + return nil, fmt.Errorf(`compact JWE format must have five parts (%d)`, len(parts)) + } + + hdrbuf, err := base64.Decode(parts[0]) + if err != nil { + return nil, fmt.Errorf(`failed to parse first part of compact form: %w`, err) + } + + protected := NewHeaders() + if err := json.Unmarshal(hdrbuf, protected); err != nil { + return nil, fmt.Errorf(`failed to parse header JSON: %w`, err) + } + + ivbuf, err := base64.Decode(parts[2]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode iv: %w`, err) + } + + ctbuf, err := base64.Decode(parts[3]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode content: %w`, err) + } + + tagbuf, err := base64.Decode(parts[4]) + if err != nil { + return nil, fmt.Errorf(`failed to base64 decode tag: %w`, err) + } + + m := NewMessage() + if err := m.Set(CipherTextKey, ctbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, CipherTextKey, err) + } + if err := m.Set(InitializationVectorKey, ivbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, InitializationVectorKey, err) + } + if err := m.Set(ProtectedHeadersKey, protected); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, ProtectedHeadersKey, err) + } + + if err := m.makeDummyRecipient(string(parts[1]), protected); err != nil { + return nil, fmt.Errorf(`failed to setup recipient: %w`, err) + } + + if err := m.Set(TagKey, tagbuf); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, TagKey, err) + } + + if storeProtectedHeaders { + // This is later used for decryption. + m.rawProtectedHeaders = parts[0] + } + + return m, nil +} + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In that case you would register a custom field as follows +// +// jwe.RegisterCustomField(`x-birthday`, timeT) +// +// Then `hdr.Get("x-birthday")` will still return an `interface{}`, +// but you can convert its type to `time.Time` +// +// bdayif, _ := hdr.Get(`x-birthday`) +// bday := bdayif.(time.Time) +func RegisterCustomField(name string, object interface{}) { + registry.Register(name, object) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/key_provider.go new file mode 100644 index 0000000000..746980fca1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/key_provider.go @@ -0,0 +1,161 @@ +package jwe + +import ( + "context" + "fmt" + "sync" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +// KeyProvider is responsible for providing key(s) to encrypt or decrypt a payload. +// Multiple `jwe.KeyProvider`s can be passed to `jwe.Encrypt()` or `jwe.Decrypt()` +// +// `jwe.Encrypt()` can only accept static key providers via `jwe.WithKey()`, +// while `jwe.Derypt()` can accept `jwe.WithKey()`, `jwe.WithKeySet()`, +// and `jwe.WithKeyProvider()`. +// +// Understanding how this works is crucial to learn how this package works. +// Here we will use `jwe.Decrypt()` as an example to show how the `KeyProvider` +// works. +// +// `jwe.Encrypt()` is straightforward: the content encryption key is encrypted +// using the provided keys, and JWS recipient objects are created for each. +// +// `jwe.Decrypt()` is a bit more involved, because there are cases you +// will want to compute/deduce/guess the keys that you would like to +// use for decryption. +// +// The first thing that `jwe.Decrypt()` needs to do is to collect the +// KeyProviders from the option list that the user provided (presented in pseudocode): +// +// keyProviders := filterKeyProviders(options) +// +// Then, remember that a JWE message may contain multiple recipients in the +// message. For each recipient, we call on the KeyProviders to give us +// the key(s) to use on this signature: +// +// for r in msg.Recipients { +// for kp in keyProviders { +// kp.FetcKeys(ctx, sink, r, msg) +// ... +// } +// } +// +// The `sink` argument passed to the KeyProvider is a temporary storage +// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider` +// is responsible for sending keys into the `sink`. +// +// When called, the `KeyProvider` created by `jwe.WithKey()` sends the same key, +// `jwe.WithKeySet()` sends keys that matches a particular `kid` and `alg`, +// and finally `jwe.WithKeyProvider()` allows you to execute arbitrary +// logic to provide keys. If you are providing a custom `KeyProvider`, +// you should execute the necessary checks or retrieval of keys, and +// then send the key(s) to the sink: +// +// sink.Key(alg, key) +// +// These keys are then retrieved and tried for each signature, until +// a match is found: +// +// keys := sink.Keys() +// for key in keys { +// if decryptJWEKey(recipient.EncryptedKey(), key) { +// return OK +// } +// } +type KeyProvider interface { + FetchKeys(context.Context, KeySink, Recipient, *Message) error +} + +// KeySink is a data storage where `jwe.KeyProvider` objects should +// send their keys to. +type KeySink interface { + Key(jwa.KeyEncryptionAlgorithm, interface{}) +} + +type algKeyPair struct { + alg jwa.KeyAlgorithm + key interface{} +} + +type algKeySink struct { + mu sync.Mutex + list []algKeyPair +} + +func (s *algKeySink) Key(alg jwa.KeyEncryptionAlgorithm, key interface{}) { + s.mu.Lock() + s.list = append(s.list, algKeyPair{alg, key}) + s.mu.Unlock() +} + +type staticKeyProvider struct { + alg jwa.KeyEncryptionAlgorithm + key interface{} +} + +func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ Recipient, _ *Message) error { + sink.Key(kp.alg, kp.key) + return nil +} + +type keySetProvider struct { + set jwk.Set + requireKid bool +} + +func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, _ Recipient, _ *Message) error { + if usage := key.KeyUsage(); usage != "" && usage != jwk.ForEncryption.String() { + return nil + } + + if v := key.Algorithm(); v.String() != "" { + var alg jwa.KeyEncryptionAlgorithm + if err := alg.Accept(v); err != nil { + return fmt.Errorf(`invalid key encryption algorithm %s: %w`, key.Algorithm(), err) + } + + sink.Key(alg, key) + return nil + } + + return nil +} + +func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, r Recipient, msg *Message) error { + if kp.requireKid { + var key jwk.Key + + wantedKid := r.Headers().KeyID() + if wantedKid == "" { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`) + } + // Otherwise we better be able to look up the key, baby. + v, ok := kp.set.LookupKeyID(wantedKid) + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + key = v + + return kp.selectKey(sink, key, r, msg) + } + + for i := 0; i < kp.set.Len(); i++ { + key, _ := kp.set.Key(i) + if err := kp.selectKey(sink, key, r, msg); err != nil { + continue + } + } + return nil +} + +// KeyProviderFunc is a type of KeyProvider that is implemented by +// a single function. You can use this to create ad-hoc `KeyProvider` +// instances. +type KeyProviderFunc func(context.Context, KeySink, Recipient, *Message) error + +func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, r Recipient, msg *Message) error { + return kp(ctx, sink, r, msg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/message.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/message.go new file mode 100644 index 0000000000..0088082d92 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/message.go @@ -0,0 +1,547 @@ +package jwe + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" +) + +// NewRecipient creates a Recipient object +func NewRecipient() Recipient { + return &stdRecipient{ + headers: NewHeaders(), + } +} + +func (r *stdRecipient) SetHeaders(h Headers) error { + r.headers = h + return nil +} + +func (r *stdRecipient) SetEncryptedKey(v []byte) error { + r.encryptedKey = v + return nil +} + +func (r *stdRecipient) Headers() Headers { + return r.headers +} + +func (r *stdRecipient) EncryptedKey() []byte { + return r.encryptedKey +} + +type recipientMarshalProxy struct { + Headers Headers `json:"header"` + EncryptedKey string `json:"encrypted_key"` +} + +func (r *stdRecipient) UnmarshalJSON(buf []byte) error { + var proxy recipientMarshalProxy + proxy.Headers = NewHeaders() + if err := json.Unmarshal(buf, &proxy); err != nil { + return fmt.Errorf(`failed to unmarshal json into recipient: %w`, err) + } + + r.headers = proxy.Headers + decoded, err := base64.DecodeString(proxy.EncryptedKey) + if err != nil { + return fmt.Errorf(`failed to decode "encrypted_key": %w`, err) + } + r.encryptedKey = decoded + return nil +} + +func (r *stdRecipient) MarshalJSON() ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.WriteString(`{"header":`) + hdrbuf, err := r.headers.MarshalJSON() + if err != nil { + return nil, fmt.Errorf(`failed to marshal recipient header: %w`, err) + } + buf.Write(hdrbuf) + buf.WriteString(`,"encrypted_key":"`) + buf.WriteString(base64.EncodeToString(r.encryptedKey)) + buf.WriteString(`"}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +// NewMessage creates a new message +func NewMessage() *Message { + return &Message{} +} + +func (m *Message) AuthenticatedData() []byte { + return m.authenticatedData +} + +func (m *Message) CipherText() []byte { + return m.cipherText +} + +func (m *Message) InitializationVector() []byte { + return m.initializationVector +} + +func (m *Message) Tag() []byte { + return m.tag +} + +func (m *Message) ProtectedHeaders() Headers { + return m.protectedHeaders +} + +func (m *Message) Recipients() []Recipient { + return m.recipients +} + +func (m *Message) UnprotectedHeaders() Headers { + return m.unprotectedHeaders +} + +const ( + AuthenticatedDataKey = "aad" + CipherTextKey = "ciphertext" + CountKey = "p2c" + InitializationVectorKey = "iv" + ProtectedHeadersKey = "protected" + RecipientsKey = "recipients" + SaltKey = "p2s" + TagKey = "tag" + UnprotectedHeadersKey = "unprotected" + HeadersKey = "header" + EncryptedKeyKey = "encrypted_key" +) + +func (m *Message) Set(k string, v interface{}) error { + switch k { + case AuthenticatedDataKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, AuthenticatedDataKey) + } + m.authenticatedData = buf + case CipherTextKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, CipherTextKey) + } + m.cipherText = buf + case InitializationVectorKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, InitializationVectorKey) + } + m.initializationVector = buf + case ProtectedHeadersKey: + cv, ok := v.(Headers) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, ProtectedHeadersKey) + } + m.protectedHeaders = cv + case RecipientsKey: + cv, ok := v.([]Recipient) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, RecipientsKey) + } + m.recipients = cv + case TagKey: + buf, ok := v.([]byte) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, TagKey) + } + m.tag = buf + case UnprotectedHeadersKey: + cv, ok := v.(Headers) + if !ok { + return fmt.Errorf(`invalid value %T for %s key`, v, UnprotectedHeadersKey) + } + m.unprotectedHeaders = cv + default: + if m.unprotectedHeaders == nil { + m.unprotectedHeaders = NewHeaders() + } + return m.unprotectedHeaders.Set(k, v) + } + return nil +} + +type messageMarshalProxy struct { + AuthenticatedData string `json:"aad,omitempty"` + CipherText string `json:"ciphertext"` + InitializationVector string `json:"iv,omitempty"` + ProtectedHeaders json.RawMessage `json:"protected"` + Recipients []json.RawMessage `json:"recipients,omitempty"` + Tag string `json:"tag,omitempty"` + UnprotectedHeaders Headers `json:"unprotected,omitempty"` + + // For flattened structure. Headers is NOT a Headers type, + // so that we can detect its presence by checking proxy.Headers != nil + Headers json.RawMessage `json:"header,omitempty"` + EncryptedKey string `json:"encrypted_key,omitempty"` +} + +type jsonKV struct { + Key string + Value string +} + +func (m *Message) MarshalJSON() ([]byte, error) { + // This is slightly convoluted, but we need to encode the + // protected headers, so we do it by hand + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + enc := json.NewEncoder(buf) + + var fields []jsonKV + + if cipherText := m.CipherText(); len(cipherText) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(cipherText)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, CipherTextKey, err) + } + fields = append(fields, jsonKV{ + Key: CipherTextKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if iv := m.InitializationVector(); len(iv) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(iv)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, InitializationVectorKey, err) + } + fields = append(fields, jsonKV{ + Key: InitializationVectorKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + var encodedProtectedHeaders []byte + if h := m.ProtectedHeaders(); h != nil { + v, err := h.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode protected headers: %w`, err) + } + + encodedProtectedHeaders = v + if len(encodedProtectedHeaders) <= 2 { // '{}' + encodedProtectedHeaders = nil + } else { + fields = append(fields, jsonKV{ + Key: ProtectedHeadersKey, + Value: fmt.Sprintf("%q", encodedProtectedHeaders), + }) + } + } + + if aad := m.AuthenticatedData(); len(aad) > 0 { + aad = base64.Encode(aad) + if encodedProtectedHeaders != nil { + tmp := append(encodedProtectedHeaders, '.') + aad = append(tmp, aad...) + } + + buf.Reset() + if err := enc.Encode(aad); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, AuthenticatedDataKey, err) + } + fields = append(fields, jsonKV{ + Key: AuthenticatedDataKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if recipients := m.Recipients(); len(recipients) > 0 { + if len(recipients) == 1 { // Use flattened format + if hdrs := recipients[0].Headers(); hdrs != nil { + buf.Reset() + if err := enc.Encode(hdrs); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, HeadersKey, err) + } + fields = append(fields, jsonKV{ + Key: HeadersKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if ek := recipients[0].EncryptedKey(); len(ek) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(ek)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, EncryptedKeyKey, err) + } + fields = append(fields, jsonKV{ + Key: EncryptedKeyKey, + Value: strings.TrimSpace(buf.String()), + }) + } + } else { + buf.Reset() + if err := enc.Encode(recipients); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, RecipientsKey, err) + } + fields = append(fields, jsonKV{ + Key: RecipientsKey, + Value: strings.TrimSpace(buf.String()), + }) + } + } + + if tag := m.Tag(); len(tag) > 0 { + buf.Reset() + if err := enc.Encode(base64.EncodeToString(tag)); err != nil { + return nil, fmt.Errorf(`failed to encode %s field: %w`, TagKey, err) + } + fields = append(fields, jsonKV{ + Key: TagKey, + Value: strings.TrimSpace(buf.String()), + }) + } + + if h := m.UnprotectedHeaders(); h != nil { + unprotected, err := json.Marshal(h) + if err != nil { + return nil, fmt.Errorf(`failed to encode unprotected headers: %w`, err) + } + + if len(unprotected) > 2 { + fields = append(fields, jsonKV{ + Key: UnprotectedHeadersKey, + Value: fmt.Sprintf("%q", unprotected), + }) + } + } + + sort.Slice(fields, func(i, j int) bool { + return fields[i].Key < fields[j].Key + }) + buf.Reset() + fmt.Fprintf(buf, `{`) + for i, kv := range fields { + if i > 0 { + fmt.Fprintf(buf, `,`) + } + fmt.Fprintf(buf, `%q:%s`, kv.Key, kv.Value) + } + fmt.Fprintf(buf, `}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (m *Message) UnmarshalJSON(buf []byte) error { + var proxy messageMarshalProxy + proxy.UnprotectedHeaders = NewHeaders() + + if err := json.Unmarshal(buf, &proxy); err != nil { + return fmt.Errorf(`failed to unmashal JSON into message: %w`, err) + } + + // Get the string value + var protectedHeadersStr string + if err := json.Unmarshal(proxy.ProtectedHeaders, &protectedHeadersStr); err != nil { + return fmt.Errorf(`failed to decode protected headers (1): %w`, err) + } + + // It's now in _quoted_ base64 string. Decode it + protectedHeadersRaw, err := base64.DecodeString(protectedHeadersStr) + if err != nil { + return fmt.Errorf(`failed to base64 decoded protected headers buffer: %w`, err) + } + + h := NewHeaders() + if err := json.Unmarshal(protectedHeadersRaw, h); err != nil { + return fmt.Errorf(`failed to decode protected headers (2): %w`, err) + } + + // if this were a flattened message, we would see a "header" and "ciphertext" + // field. TODO: do both of these conditions need to meet, or just one? + if proxy.Headers != nil || len(proxy.EncryptedKey) > 0 { + recipient := NewRecipient() + hdrs := NewHeaders() + if err := json.Unmarshal(proxy.Headers, hdrs); err != nil { + return fmt.Errorf(`failed to decode headers field: %w`, err) + } + + if err := recipient.SetHeaders(hdrs); err != nil { + return fmt.Errorf(`failed to set new headers: %w`, err) + } + + if v := proxy.EncryptedKey; len(v) > 0 { + buf, err := base64.DecodeString(v) + if err != nil { + return fmt.Errorf(`failed to decode encrypted key: %w`, err) + } + if err := recipient.SetEncryptedKey(buf); err != nil { + return fmt.Errorf(`failed to set encrypted key: %w`, err) + } + } + + m.recipients = append(m.recipients, recipient) + } else { + for i, recipientbuf := range proxy.Recipients { + recipient := NewRecipient() + if err := json.Unmarshal(recipientbuf, recipient); err != nil { + return fmt.Errorf(`failed to decode recipient at index %d: %w`, i, err) + } + + m.recipients = append(m.recipients, recipient) + } + } + + if src := proxy.AuthenticatedData; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "aad": %w`, err) + } + m.authenticatedData = v + } + + if src := proxy.CipherText; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "ciphertext": %w`, err) + } + m.cipherText = v + } + + if src := proxy.InitializationVector; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "iv": %w`, err) + } + m.initializationVector = v + } + + if src := proxy.Tag; len(src) > 0 { + v, err := base64.DecodeString(src) + if err != nil { + return fmt.Errorf(`failed to decode "tag": %w`, err) + } + m.tag = v + } + + m.protectedHeaders = h + if m.storeProtectedHeaders { + // this is later used for decryption + m.rawProtectedHeaders = base64.Encode(protectedHeadersRaw) + } + + if iz, ok := proxy.UnprotectedHeaders.(isZeroer); ok { + if !iz.isZero() { + m.unprotectedHeaders = proxy.UnprotectedHeaders + } + } + + if len(m.recipients) == 0 { + if err := m.makeDummyRecipient(proxy.EncryptedKey, m.protectedHeaders); err != nil { + return fmt.Errorf(`failed to setup recipient: %w`, err) + } + } + + return nil +} + +func (m *Message) makeDummyRecipient(enckeybuf string, protected Headers) error { + // Recipients in this case should not contain the content encryption key, + // so move that out + hdrs, err := protected.Clone(context.TODO()) + if err != nil { + return fmt.Errorf(`failed to clone headers: %w`, err) + } + + if err := hdrs.Remove(ContentEncryptionKey); err != nil { + return fmt.Errorf(`failed to remove %#v from public header: %w`, ContentEncryptionKey, err) + } + + enckey, err := base64.DecodeString(enckeybuf) + if err != nil { + return fmt.Errorf(`failed to decode encrypted key: %w`, err) + } + + if err := m.Set(RecipientsKey, []Recipient{ + &stdRecipient{ + headers: hdrs, + encryptedKey: enckey, + }, + }); err != nil { + return fmt.Errorf(`failed to set %s: %w`, RecipientsKey, err) + } + return nil +} + +// Compact generates a JWE message in compact serialization format from a +// `*jwe.Message` object. The object contain exactly one recipient, or +// an error is returned. +// +// This function currently does not take any options, but the function +// signature contains `options` for possible future expansion of the API +func Compact(m *Message, _ ...CompactOption) ([]byte, error) { + if len(m.recipients) != 1 { + return nil, fmt.Errorf(`wrong number of recipients for compact serialization`) + } + + recipient := m.recipients[0] + + // The protected header must be a merge between the message-wide + // protected header AND the recipient header + + // There's something wrong if m.protectedHeaders is nil, but + // it could happen + if m.protectedHeaders == nil { + return nil, fmt.Errorf(`invalid protected header`) + } + + ctx := context.TODO() + hcopy, err := m.protectedHeaders.Clone(ctx) + if err != nil { + return nil, fmt.Errorf(`failed to copy protected header: %w`, err) + } + hcopy, err = hcopy.Merge(ctx, m.unprotectedHeaders) + if err != nil { + return nil, fmt.Errorf(`failed to merge unprotected header: %w`, err) + } + hcopy, err = hcopy.Merge(ctx, recipient.Headers()) + if err != nil { + return nil, fmt.Errorf(`failed to merge recipient header: %w`, err) + } + + protected, err := hcopy.Encode() + if err != nil { + return nil, fmt.Errorf(`failed to encode header: %w`, err) + } + + encryptedKey := base64.Encode(recipient.EncryptedKey()) + iv := base64.Encode(m.initializationVector) + cipher := base64.Encode(m.cipherText) + tag := base64.Encode(m.tag) + + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.Grow(len(protected) + len(encryptedKey) + len(iv) + len(cipher) + len(tag) + 4) + buf.Write(protected) + buf.WriteByte('.') + buf.Write(encryptedKey) + buf.WriteByte('.') + buf.Write(iv) + buf.WriteByte('.') + buf.Write(cipher) + buf.WriteByte('.') + buf.Write(tag) + + result := make([]byte, buf.Len()) + copy(result, buf.Bytes()) + return result, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.go new file mode 100644 index 0000000000..f31c635189 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.go @@ -0,0 +1,107 @@ +package jwe + +import ( + "context" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/option" +) + +// Specify contents of the protected header. Some fields such as +// "enc" and "zip" will be overwritten when encryption is performed. +// +// There is no equivalent for unprotected headers in this implementation +func WithProtectedHeaders(h Headers) EncryptOption { + cloned, _ := h.Clone(context.Background()) + return &encryptOption{option.New(identProtectedHeaders{}, cloned)} +} + +type withKey struct { + alg jwa.KeyAlgorithm + key interface{} + headers Headers +} + +type WithKeySuboption interface { + Option + withKeySuboption() +} + +type withKeySuboption struct { + Option +} + +func (*withKeySuboption) withKeySuboption() {} + +// WithPerRecipientHeaders is used to pass header values for each recipient. +// Note that these headers are by definition _unprotected_. +func WithPerRecipientHeaders(hdr Headers) WithKeySuboption { + return &withKeySuboption{option.New(identPerRecipientHeaders{}, hdr)} +} + +// WithKey is used to pass a static algorithm/key pair to either `jwe.Encrypt()` or `jwe.Decrypt()`. +// either a raw key or `jwk.Key` may be passed as `key`. +// +// The `alg` parameter is the identifier for the key encryption algorithm that should be used. +// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.SignatureAlgorithm` +// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly +// passed to the option. If you specify other algorithm types such as `jwa.ContentEncryptionAlgorithm`, +// then you will get an error when `jwe.Encrypt()` or `jwe.Decrypt()` is executed. +// +// Unlike `jwe.WithKeySet()`, the `kid` field does not need to match for the key +// to be tried. +func WithKey(alg jwa.KeyAlgorithm, key interface{}, options ...WithKeySuboption) EncryptDecryptOption { + var hdr Headers + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identPerRecipientHeaders{}: + hdr = option.Value().(Headers) + } + } + + return &encryptDecryptOption{option.New(identKey{}, &withKey{ + alg: alg, + key: key, + headers: hdr, + })} +} + +func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) DecryptOption { + requireKid := true + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identRequireKid{}: + requireKid = option.Value().(bool) + } + } + + return WithKeyProvider(&keySetProvider{ + set: set, + requireKid: requireKid, + }) +} + +// WithJSON specifies that the result of `jwe.Encrypt()` is serialized in +// JSON format. +// +// If you pass multiple keys to `jwe.Encrypt()`, it will fail unless +// you also pass this option. +func WithJSON(options ...WithJSONSuboption) EncryptOption { + var pretty bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identPretty{}: + pretty = option.Value().(bool) + } + } + + format := fmtJSON + if pretty { + format = fmtJSONPretty + } + return &encryptOption{option.New(identSerialization{}, format)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.yaml new file mode 100644 index 0000000000..84f89666d0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options.yaml @@ -0,0 +1,122 @@ +package_name: jwe +output: jwe/options_gen.go +interfaces: + - name: CompactOption + comment: | + CompactOption describes options that can be passed to `jwe.Compact` + - name: DecryptOption + comment: | + DecryptOption describes options that can be passed to `jwe.Decrypt` + - name: EncryptOption + comment: | + EncryptOption describes options that can be passed to `jwe.Encrypt` + - name: EncryptDecryptOption + methods: + - encryptOption + - decryptOption + comment: | + EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt` + - name: WithJSONSuboption + concrete_type: withJSONSuboption + comment: | + JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option + - name: WithKeySetSuboption + comment: | + WithKeySetSuboption is a suboption passed to the WithKeySet() option + - name: ParseOption + methods: + - readFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile` +options: + - ident: Key + skip_option: true + - ident: Pretty + skip_option: true + - ident: ProtectedHeaders + skip_option: true + - ident: PerRecipientHeaders + skip_option: true + - ident: KeyProvider + interface: DecryptOption + argument_type: KeyProvider + - ident: Serialization + option_name: WithCompact + interface: EncryptOption + constant_value: fmtCompact + comment: | + WithCompact specifies that the result of `jwe.Encrypt()` is serialized in + compact format. + + By default `jwe.Encrypt()` will opt to use compact format, so you usually + do not need to specify this option other than to be explicit about it + - ident: Compress + interface: EncryptOption + argument_type: jwa.CompressionAlgorithm + comment: | + WithCompress specifies the compression algorithm to use when encrypting + a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF", + but the way the specification is written it could allow for more options, + and therefore this option takes an argument) + - ident: ContentEncryptionAlgorithm + interface: EncryptOption + option_name: WithContentEncryption + argument_type: jwa.ContentEncryptionAlgorithm + comment: | + WithContentEncryptionAlgorithm specifies the algorithm to encrypt the + JWE message content with. If not provided, `jwa.A256GCM` is used. + - ident: Message + interface: DecryptOption + argument_type: '*Message' + comment: | + WithMessage provides a message object to be populated by `jwe.Decrpt` + Using this option allows you to decrypt AND obtain the `jwe.Message` + in one go. + + Note that you should NOT be using the message object for anything other + than inspecting its contents. Particularly, do not expect the message + reliable when you call `Decrypt` on it. `(jwe.Message).Decrypt` is + slated to be deprecated in the next major version. + - ident: RequireKid + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithrequiredKid specifies whether the keys in the jwk.Set should + only be matched if the target JWE message's Key ID and the Key ID + in the given key matches. + - ident: Pretty + interface: WithJSONSuboption + argument_type: bool + comment: | + WithPretty specifies whether the JSON output should be formatted and + indented + - ident: MergeProtectedHeaders + interface: EncryptOption + argument_type: bool + comment: | + WithMergeProtectedHeaders specify that when given multiple headers + as options to `jwe.Encrypt`, these headers should be merged instead + of overwritten + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: KeyUsed + interface: DecryptOption + argument_type: 'interface{}' + comment: | + WithKeyUsed allows you to specify the `jwe.Decrypt()` function to + return the key used for decryption. This may be useful when + you specify multiple key sources or if you pass a `jwk.Set` + and you want to know which key was successful at decrypting the + signature. + + `v` must be a pointer to an empty `interface{}`. Do not use + `jwk.Key` here unless you are 100% sure that all keys that you + have provided are instances of `jwk.Key` (remember that the + jwx API allows users to specify a raw key such as *rsa.PublicKey) + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwe/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options_gen.go new file mode 100644 index 0000000000..9adb9753e4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwe/options_gen.go @@ -0,0 +1,255 @@ +// This file is auto-generated by internal/cmd/genoptions/main.go. DO NOT EDIT + +package jwe + +import ( + "io/fs" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/option" +) + +type Option = option.Interface + +// CompactOption describes options that can be passed to `jwe.Compact` +type CompactOption interface { + Option + compactOption() +} + +type compactOption struct { + Option +} + +func (*compactOption) compactOption() {} + +// DecryptOption describes options that can be passed to `jwe.Decrypt` +type DecryptOption interface { + Option + decryptOption() +} + +type decryptOption struct { + Option +} + +func (*decryptOption) decryptOption() {} + +// EncryptDecryptOption describes options that can be passed to either `jwe.Encrypt` or `jwe.Decrypt` +type EncryptDecryptOption interface { + Option + encryptOption() + decryptOption() +} + +type encryptDecryptOption struct { + Option +} + +func (*encryptDecryptOption) encryptOption() {} + +func (*encryptDecryptOption) decryptOption() {} + +// EncryptOption describes options that can be passed to `jwe.Encrypt` +type EncryptOption interface { + Option + encryptOption() +} + +type encryptOption struct { + Option +} + +func (*encryptOption) encryptOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` +type ParseOption interface { + Option + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// JSONSuboption describes suboptions that can be passed to `jwe.WithJSON()` option +type WithJSONSuboption interface { + Option + withJSONSuboption() +} + +type withJSONSuboption struct { + Option +} + +func (*withJSONSuboption) withJSONSuboption() {} + +// WithKeySetSuboption is a suboption passed to the WithKeySet() option +type WithKeySetSuboption interface { + Option + withKeySetSuboption() +} + +type withKeySetSuboption struct { + Option +} + +func (*withKeySetSuboption) withKeySetSuboption() {} + +type identCompress struct{} +type identContentEncryptionAlgorithm struct{} +type identFS struct{} +type identKey struct{} +type identKeyProvider struct{} +type identKeyUsed struct{} +type identMergeProtectedHeaders struct{} +type identMessage struct{} +type identPerRecipientHeaders struct{} +type identPretty struct{} +type identProtectedHeaders struct{} +type identRequireKid struct{} +type identSerialization struct{} + +func (identCompress) String() string { + return "WithCompress" +} + +func (identContentEncryptionAlgorithm) String() string { + return "WithContentEncryption" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identKey) String() string { + return "WithKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identKeyUsed) String() string { + return "WithKeyUsed" +} + +func (identMergeProtectedHeaders) String() string { + return "WithMergeProtectedHeaders" +} + +func (identMessage) String() string { + return "WithMessage" +} + +func (identPerRecipientHeaders) String() string { + return "WithPerRecipientHeaders" +} + +func (identPretty) String() string { + return "WithPretty" +} + +func (identProtectedHeaders) String() string { + return "WithProtectedHeaders" +} + +func (identRequireKid) String() string { + return "WithRequireKid" +} + +func (identSerialization) String() string { + return "WithSerialization" +} + +// WithCompress specifies the compression algorithm to use when encrypting +// a payload using `jwe.Encrypt` (Yes, we know it can only be "" or "DEF", +// but the way the specification is written it could allow for more options, +// and therefore this option takes an argument) +func WithCompress(v jwa.CompressionAlgorithm) EncryptOption { + return &encryptOption{option.New(identCompress{}, v)} +} + +// WithContentEncryptionAlgorithm specifies the algorithm to encrypt the +// JWE message content with. If not provided, `jwa.A256GCM` is used. +func WithContentEncryption(v jwa.ContentEncryptionAlgorithm) EncryptOption { + return &encryptOption{option.New(identContentEncryptionAlgorithm{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +func WithKeyProvider(v KeyProvider) DecryptOption { + return &decryptOption{option.New(identKeyProvider{}, v)} +} + +// WithKeyUsed allows you to specify the `jwe.Decrypt()` function to +// return the key used for decryption. This may be useful when +// you specify multiple key sources or if you pass a `jwk.Set` +// and you want to know which key was successful at decrypting the +// signature. +// +// `v` must be a pointer to an empty `interface{}`. Do not use +// `jwk.Key` here unless you are 100% sure that all keys that you +// have provided are instances of `jwk.Key` (remember that the +// jwx API allows users to specify a raw key such as *rsa.PublicKey) +func WithKeyUsed(v interface{}) DecryptOption { + return &decryptOption{option.New(identKeyUsed{}, v)} +} + +// WithMergeProtectedHeaders specify that when given multiple headers +// as options to `jwe.Encrypt`, these headers should be merged instead +// of overwritten +func WithMergeProtectedHeaders(v bool) EncryptOption { + return &encryptOption{option.New(identMergeProtectedHeaders{}, v)} +} + +// WithMessage provides a message object to be populated by `jwe.Decrpt` +// Using this option allows you to decrypt AND obtain the `jwe.Message` +// in one go. +// +// Note that you should NOT be using the message object for anything other +// than inspecting its contents. Particularly, do not expect the message +// reliable when you call `Decrypt` on it. `(jwe.Message).Decrypt` is +// slated to be deprecated in the next major version. +func WithMessage(v *Message) DecryptOption { + return &decryptOption{option.New(identMessage{}, v)} +} + +// WithPretty specifies whether the JSON output should be formatted and +// indented +func WithPretty(v bool) WithJSONSuboption { + return &withJSONSuboption{option.New(identPretty{}, v)} +} + +// WithrequiredKid specifies whether the keys in the jwk.Set should +// only be matched if the target JWE message's Key ID and the Key ID +// in the given key matches. +func WithRequireKid(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identRequireKid{}, v)} +} + +// WithCompact specifies that the result of `jwe.Encrypt()` is serialized in +// compact format. +// +// By default `jwe.Encrypt()` will opt to use compact format, so you usually +// do not need to specify this option other than to be explicit about it +func WithCompact() EncryptOption { + return &encryptOption{option.New(identSerialization{}, fmtCompact)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md new file mode 100644 index 0000000000..a5ded403d1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/README.md @@ -0,0 +1,223 @@ +# JWK [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwk.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwk) + +Package jwk implements JWK as described in [RFC7517](https://tools.ietf.org/html/rfc7517). +If you are looking to use JWT wit JWKs, look no further than [github.com/lestrrat-go/jwx](../jwt). + +* Parse and work with RSA/EC/Symmetric/OKP JWK types + * Convert to and from JSON + * Convert to and from raw key types (e.g. *rsa.PrivateKey) +* Ability to keep a JWKS fresh using *jwk.AutoRefersh + +## Supported key types: + +| kty | Curve | Go Key Type | +|:----|:------------------------|:----------------------------------------------| +| RSA | N/A | rsa.PrivateKey / rsa.PublicKey (2) | +| EC | P-256
P-384
P-521
secp256k1 (1) | ecdsa.PrivateKey / ecdsa.PublicKey (2) | +| oct | N/A | []byte | +| OKP | Ed25519 (1) | ed25519.PrivateKey / ed25519.PublicKey (2) | +| | X25519 (1) | (jwx/)x25519.PrivateKey / x25519.PublicKey (2)| + +* Note 1: Experimental +* Note 2: Either value or pointers accepted (e.g. rsa.PrivateKey or *rsa.PrivateKey) + +# Documentation + +Please read the [API reference](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwk), or +the how-to style documentation on how to use JWK can be found in the [docs directory](../docs/04-jwk.md). + +# Auto-Refresh a key during a long running process + + +```go +package examples_test + +import ( + "context" + "fmt" + "time" + + "github.com/lestrrat-go/jwx/v2/jwk" +) + +func ExampleJWK_Cache() { + ctx, cancel := context.WithCancel(context.Background()) + + const googleCerts = `https://www.googleapis.com/oauth2/v3/certs` + + // First, set up the `jwk.Cache` object. You need to pass it a + // `context.Context` object to control the lifecycle of the background fetching goroutine. + // + // Note that by default refreshes only happen very 15 minutes at the + // earliest. If you need to control this, use `jwk.WithRefreshWindow()` + c := jwk.NewCache(ctx) + + // Tell *jwk.Cache that we only want to refresh this JWKS + // when it needs to (based on Cache-Control or Expires header from + // the HTTP response). If the calculated minimum refresh interval is less + // than 15 minutes, don't go refreshing any earlier than 15 minutes. + c.Register(googleCerts, jwk.WithMinRefreshInterval(15*time.Minute)) + + // Refresh the JWKS once before getting into the main loop. + // This allows you to check if the JWKS is available before we start + // a long-running program + _, err := c.Refresh(ctx, googleCerts) + if err != nil { + fmt.Printf("failed to refresh google JWKS: %s\n", err) + return + } + + // Pretend that this is your program's main loop +MAIN: + for { + select { + case <-ctx.Done(): + break MAIN + default: + } + keyset, err := c.Get(ctx, googleCerts) + if err != nil { + fmt.Printf("failed to fetch google JWKS: %s\n", err) + return + } + _ = keyset + // The returned `keyset` will always be "reasonably" new. It is important that + // you always call `ar.Fetch()` before using the `keyset` as this is where the refreshing occurs. + // + // By "reasonably" we mean that we cannot guarantee that the keys will be refreshed + // immediately after it has been rotated in the remote source. But it should be close\ + // enough, and should you need to forcefully refresh the token using the `(jwk.Cache).Refresh()` method. + // + // If re-fetching the keyset fails, a cached version will be returned from the previous successful + // fetch upon calling `(jwk.Cache).Fetch()`. + + // Do interesting stuff with the keyset... but here, we just + // sleep for a bit + time.Sleep(time.Second) + + // Because we're a dummy program, we just cancel the loop now. + // If this were a real program, you prosumably loop forever + cancel() + } + // OUTPUT: +} +``` +source: [examples/jwk_cache_example_test.go](https://github.com/lestrrat-go/jwx/blob/v2/examples/jwk_cache_example_test.go) + + +Parse and use a JWK key: + + +```go +package examples_test + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "log" + + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +func ExampleJWK_Usage() { + // Use jwk.Cache if you intend to keep reuse the JWKS over and over + set, err := jwk.Fetch(context.Background(), "https://www.googleapis.com/oauth2/v3/certs") + if err != nil { + log.Printf("failed to parse JWK: %s", err) + return + } + + // Key sets can be serialized back to JSON + { + jsonbuf, err := json.Marshal(set) + if err != nil { + log.Printf("failed to marshal key set into JSON: %s", err) + return + } + log.Printf("%s", jsonbuf) + } + + for it := set.Iterate(context.Background()); it.Next(context.Background()); { + pair := it.Pair() + key := pair.Value.(jwk.Key) + + var rawkey interface{} // This is the raw key, like *rsa.PrivateKey or *ecdsa.PrivateKey + if err := key.Raw(&rawkey); err != nil { + log.Printf("failed to create public key: %s", err) + return + } + // Use rawkey for jws.Verify() or whatever. + _ = rawkey + + // You can create jwk.Key from a raw key, too + fromRawKey, err := jwk.FromRaw(rawkey) + if err != nil { + log.Printf("failed to acquire raw key from jwk.Key: %s", err) + return + } + + // Keys can be serialized back to JSON + jsonbuf, err := json.Marshal(key) + if err != nil { + log.Printf("failed to marshal key into JSON: %s", err) + return + } + + fromJSONKey, err := jwk.Parse(jsonbuf) + if err != nil { + log.Printf("failed to parse json: %s", err) + return + } + _ = fromJSONKey + _ = fromRawKey + } + // OUTPUT: +} + +//nolint:govet +func ExampleJWK_MarshalJSON() { + // to get the same values every time, we need to create a static source + // of "randomness" + rdr := bytes.NewReader([]byte("01234567890123456789012345678901234567890123456789ABCDEF")) + raw, err := ecdsa.GenerateKey(elliptic.P384(), rdr) + if err != nil { + fmt.Printf("failed to generate new ECDSA private key: %s\n", err) + return + } + + key, err := jwk.FromRaw(raw) + if err != nil { + fmt.Printf("failed to create ECDSA key: %s\n", err) + return + } + if _, ok := key.(jwk.ECDSAPrivateKey); !ok { + fmt.Printf("expected jwk.ECDSAPrivateKey, got %T\n", key) + return + } + + key.Set(jwk.KeyIDKey, "mykey") + + buf, err := json.MarshalIndent(key, "", " ") + if err != nil { + fmt.Printf("failed to marshal key into JSON: %s\n", err) + return + } + fmt.Printf("%s\n", buf) + + // OUTPUT: + // { + // "crv": "P-384", + // "d": "ODkwMTIzNDU2Nzg5MDEyMz7deMbyLt8g4cjcxozuIoygLLlAeoQ1AfM9TSvxkFHJ", + // "kid": "mykey", + // "kty": "EC", + // "x": "gvvRMqm1w5aHn7sVNA2QUJeOVcedUnmiug6VhU834gzS9k87crVwu9dz7uLOdoQl", + // "y": "7fVF7b6J_6_g6Wu9RuJw8geWxEi5ja9Gp2TSdELm5u2E-M7IF-bsxqcdOj3n1n7N" + // } +} +``` +source: [examples/jwk_example_test.go](https://github.com/lestrrat-go/jwx/blob/v2/examples/jwk_example_test.go) + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go new file mode 100644 index 0000000000..2230505d51 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/cache.go @@ -0,0 +1,407 @@ +package jwk + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/lestrrat-go/httprc" + "github.com/lestrrat-go/iter/arrayiter" + "github.com/lestrrat-go/iter/mapiter" +) + +type Transformer = httprc.Transformer +type HTTPClient = httprc.HTTPClient +type ErrSink = httprc.ErrSink + +// Whitelist describes a set of rules that allows users to access +// a particular URL. By default all URLs are blocked for security +// reasons. You will HAVE to provide some sort of whitelist. See +// the documentation for github.com/lestrrat-go/httprc for more details. +type Whitelist = httprc.Whitelist + +// Cache is a container that keeps track of Set object by their source URLs. +// The Set objects are stored in memory, and are refreshed automatically +// behind the scenes. +// +// Before retrieving the Set objects, the user must pre-register the +// URLs they intend to use by calling `Register()` +// +// c := jwk.NewCache(ctx) +// c.Register(url, options...) +// +// Once registered, you can call `Get()` to retrieve the Set object. +// +// All JWKS objects that are retrieved via this mechanism should be +// treated read-only, as they are shared among all consumers, as well +// as the `jwk.Cache` object. +// +// There are cases where `jwk.Cache` and `jwk.CachedSet` should and +// should not be used. +// +// First and foremost, do NOT use a cache for those JWKS objects that +// need constant checking. For example, unreliable or user-provided JWKS (i.e. those +// JWKS that are not from a well-known provider) should not be fetched +// through a `jwk.Cache` or `jwk.CachedSet`. +// +// For example, if you have a flaky JWKS server for development +// that can go down often, you should consider alternatives such as +// providing `http.Client` with a caching `http.RoundTripper` configured +// (see `jwk.WithHTTPClient`), setting up a reverse proxy, etc. +// These techniques allow you to setup a more robust way to both cache +// and report precise causes of the problems than using `jwk.Cache` or +// `jwk.CachedSet`. If you handle the caching at the HTTP level like this, +// you will be able to use a simple `jwk.Fetch` call and not worry about the cache. +// +// User-provided JWKS objects may also be problematic, as it may go down +// unexpectedly (and frequently!), and it will be hard to detect when +// the URLs or its contents are swapped. +// +// A good use-case for `jwk.Cache` and `jwk.CachedSet` are for "stable" +// JWKS objects. +// +// When we say "stable", we are thinking of JWKS that should mostly be +// ALWAYS available. A good example are those JWKS objects provided by +// major cloud providers such as Google Cloud, AWS, or Azure. +// Stable JWKS may still experience intermittent network connectivity problems, +// but you can expect that they will eventually recover in relatively +// short period of time. They rarely change URLs, and the contents are +// expected to be valid or otherwise it would cause havoc to those providers +// +// We also know that these stable JWKS objects are rotated periodically, +// which is a perfect use for `jwk.Cache` and `jwk.CachedSet`. The caches +// can be configured to perodically refresh the JWKS thereby keeping them +// fresh without extra intervention from the developer. +// +// Notice that for these recommended use-cases the requirement to check +// the validity or the availability of the JWKS objects are non-existent, +// as it is expected that they will be available and will be valid. The +// caching mechanism can hide intermittent connectivity problems as well +// as keep the objects mostly fresh. +type Cache struct { + cache *httprc.Cache +} + +// PostFetcher is an interface for objects that want to perform +// operations on the `Set` that was fetched. +type PostFetcher interface { + // PostFetch revceives the URL and the JWKS, after a successful + // fetch and parse. + // + // It should return a `Set`, optionally modified, to be stored + // in the cache for subsequent use + PostFetch(string, Set) (Set, error) +} + +// PostFetchFunc is a PostFetcher based on a functon. +type PostFetchFunc func(string, Set) (Set, error) + +func (f PostFetchFunc) PostFetch(u string, set Set) (Set, error) { + return f(u, set) +} + +// httprc.Transofmer that transforms the response into a JWKS +type jwksTransform struct { + postFetch PostFetcher + parseOptions []ParseOption +} + +// Default transform has no postFetch. This can be shared +// by multiple fetchers +var defaultTransform = &jwksTransform{} + +func (t *jwksTransform) Transform(u string, res *http.Response) (interface{}, error) { + buf, err := io.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf(`failed to read response body status: %w`, err) + } + + set, err := Parse(buf, t.parseOptions...) + if err != nil { + return nil, fmt.Errorf(`failed to parse JWK set at %q: %w`, u, err) + } + + if pf := t.postFetch; pf != nil { + v, err := pf.PostFetch(u, set) + if err != nil { + return nil, fmt.Errorf(`failed to execute PostFetch: %w`, err) + } + set = v + } + + return set, nil +} + +// NewCache creates a new `jwk.Cache` object. +// +// Please refer to the documentation for `httprc.New` for more +// details. +func NewCache(ctx context.Context, options ...CacheOption) *Cache { + var hrcopts []httprc.CacheOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identRefreshWindow{}: + hrcopts = append(hrcopts, httprc.WithRefreshWindow(option.Value().(time.Duration))) + case identErrSink{}: + hrcopts = append(hrcopts, httprc.WithErrSink(option.Value().(ErrSink))) + } + } + + return &Cache{ + cache: httprc.NewCache(ctx, hrcopts...), + } +} + +// Register registers a URL to be managed by the cache. URLs must +// be registered before issuing `Get` +// +// This method is almost identical to `(httprc.Cache).Register`, except +// it accepts some extra options. +// +// Use `jwk.WithParser` to configure how the JWKS should be parsed, +// such as passing it extra options. +// +// Please refer to the documentation for `(httprc.Cache).Register` for more +// details. +// +// Register does not check for the validity of the url being registered. +// If you need to make sure that a url is valid before entering your main +// loop, call `Refresh` once to make sure the JWKS is available. +// +// _ = cache.Register(url) +// if _, err := cache.Refresh(ctx, url); err != nil { +// // url is not a valid JWKS +// panic(err) +// } +func (c *Cache) Register(u string, options ...RegisterOption) error { + var hrropts []httprc.RegisterOption + var pf PostFetcher + var parseOptions []ParseOption + + // Note: we do NOT accept Transform option + for _, option := range options { + if parseOpt, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, parseOpt) + continue + } + + //nolint:forcetypeassert + switch option.Ident() { + case identHTTPClient{}: + hrropts = append(hrropts, httprc.WithHTTPClient(option.Value().(HTTPClient))) + case identRefreshInterval{}: + hrropts = append(hrropts, httprc.WithRefreshInterval(option.Value().(time.Duration))) + case identMinRefreshInterval{}: + hrropts = append(hrropts, httprc.WithMinRefreshInterval(option.Value().(time.Duration))) + case identFetchWhitelist{}: + hrropts = append(hrropts, httprc.WithWhitelist(option.Value().(httprc.Whitelist))) + case identPostFetcher{}: + pf = option.Value().(PostFetcher) + } + } + + var t *jwksTransform + if pf == nil && len(parseOptions) == 0 { + t = defaultTransform + } else { + // User-supplied PostFetcher is attached to the transformer + t = &jwksTransform{ + postFetch: pf, + parseOptions: parseOptions, + } + } + + // Set the transfomer at the end so that nobody can override it + hrropts = append(hrropts, httprc.WithTransformer(t)) + return c.cache.Register(u, hrropts...) +} + +// Get returns the stored JWK set (`Set`) from the cache. +// +// Please refer to the documentation for `(httprc.Cache).Get` for more +// details. +func (c *Cache) Get(ctx context.Context, u string) (Set, error) { + v, err := c.cache.Get(ctx, u) + if err != nil { + return nil, err + } + + set, ok := v.(Set) + if !ok { + return nil, fmt.Errorf(`cached object is not a Set (was %T)`, v) + } + return set, nil +} + +// Refresh is identical to Get(), except it always fetches the +// specified resource anew, and updates the cached content +// +// Please refer to the documentation for `(httprc.Cache).Refresh` for +// more details +func (c *Cache) Refresh(ctx context.Context, u string) (Set, error) { + v, err := c.cache.Refresh(ctx, u) + if err != nil { + return nil, err + } + + set, ok := v.(Set) + if !ok { + return nil, fmt.Errorf(`cached object is not a Set (was %T)`, v) + } + return set, nil +} + +// IsRegistered returns true if the given URL `u` has already been registered +// in the cache. +// +// Please refer to the documentation for `(httprc.Cache).IsRegistered` for more +// details. +func (c *Cache) IsRegistered(u string) bool { + return c.cache.IsRegistered(u) +} + +// Unregister removes the given URL `u` from the cache. +// +// Please refer to the documentation for `(httprc.Cache).Unregister` for more +// details. +func (c *Cache) Unregister(u string) error { + return c.cache.Unregister(u) +} + +func (c *Cache) Snapshot() *httprc.Snapshot { + return c.cache.Snapshot() +} + +// CachedSet is a thin shim over jwk.Cache that allows the user to cloack +// jwk.Cache as if it's a `jwk.Set`. Behind the scenes, the `jwk.Set` is +// retrieved from the `jwk.Cache` for every operation. +// +// Since `jwk.CachedSet` always deals with a cached version of the `jwk.Set`, +// all operations that mutate the object (such as AddKey(), RemoveKey(), et. al) +// are no-ops and return an error. +// +// Note that since this is a utility shim over `jwk.Cache`, you _will_ lose +// the ability to control the finer details (such as controlling how long to +// wait for in case of a fetch failure using `context.Context`) +// +// Make sure that you read the documentation for `jwk.Cache` as well. +type CachedSet struct { + cache *Cache + url string +} + +var _ Set = &CachedSet{} + +func NewCachedSet(cache *Cache, url string) Set { + return &CachedSet{ + cache: cache, + url: url, + } +} + +func (cs *CachedSet) cached() (Set, error) { + return cs.cache.Get(context.Background(), cs.url) +} + +// Add is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*CachedSet) AddKey(_ Key) error { + return fmt.Errorf(`(jwk.Cachedset).AddKey: jwk.CachedSet is immutable`) +} + +// Clear is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*CachedSet) Clear() error { + return fmt.Errorf(`(jwk.CachedSet).Clear: jwk.CachedSet is immutable`) +} + +// Set is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*CachedSet) Set(_ string, _ interface{}) error { + return fmt.Errorf(`(jwk.CachedSet).Set: jwk.CachedSet is immutable`) +} + +// Remove is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*CachedSet) Remove(_ string) error { + // TODO: Remove() should be renamed to Remove(string) error + return fmt.Errorf(`(jwk.CachedSet).Remove: jwk.CachedSet is immutable`) +} + +// RemoveKey is a no-op for `jwk.CachedSet`, as the `jwk.Set` should be treated read-only +func (*CachedSet) RemoveKey(_ Key) error { + return fmt.Errorf(`(jwk.CachedSet).RemoveKey: jwk.CachedSet is immutable`) +} + +func (cs *CachedSet) Clone() (Set, error) { + set, err := cs.cached() + if err != nil { + return nil, fmt.Errorf(`failed to get cached jwk.Set: %w`, err) + } + + return set.Clone() +} + +// Get returns the value of non-Key field stored in the jwk.Set +func (cs *CachedSet) Get(name string) (interface{}, bool) { + set, err := cs.cached() + if err != nil { + return nil, false + } + + return set.Get(name) +} + +// Key returns the Key at the specified index +func (cs *CachedSet) Key(idx int) (Key, bool) { + set, err := cs.cached() + if err != nil { + return nil, false + } + + return set.Key(idx) +} + +func (cs *CachedSet) Index(key Key) int { + set, err := cs.cached() + if err != nil { + return -1 + } + + return set.Index(key) +} + +func (cs *CachedSet) Keys(ctx context.Context) KeyIterator { + set, err := cs.cached() + if err != nil { + return arrayiter.New(nil) + } + + return set.Keys(ctx) +} + +func (cs *CachedSet) Iterate(ctx context.Context) HeaderIterator { + set, err := cs.cached() + if err != nil { + return mapiter.New(nil) + } + + return set.Iterate(ctx) +} + +func (cs *CachedSet) Len() int { + set, err := cs.cached() + if err != nil { + return -1 + } + + return set.Len() +} + +func (cs *CachedSet) LookupKeyID(kid string) (Key, bool) { + set, err := cs.cached() + if err != nil { + return nil, false + } + + return set.LookupKeyID(kid) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go new file mode 100644 index 0000000000..67a14ba63e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa.go @@ -0,0 +1,228 @@ +package jwk + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "fmt" + "math/big" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/ecutil" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +func init() { + ecutil.RegisterCurve(elliptic.P256(), jwa.P256) + ecutil.RegisterCurve(elliptic.P384(), jwa.P384) + ecutil.RegisterCurve(elliptic.P521(), jwa.P521) +} + +func (k *ecdsaPublicKey) FromRaw(rawKey *ecdsa.PublicKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + if rawKey.X == nil { + return fmt.Errorf(`invalid ecdsa.PublicKey`) + } + + if rawKey.Y == nil { + return fmt.Errorf(`invalid ecdsa.PublicKey`) + } + + xbuf := ecutil.AllocECPointBuffer(rawKey.X, rawKey.Curve) + ybuf := ecutil.AllocECPointBuffer(rawKey.Y, rawKey.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + k.x = make([]byte, len(xbuf)) + copy(k.x, xbuf) + k.y = make([]byte, len(ybuf)) + copy(k.y, ybuf) + + var crv jwa.EllipticCurveAlgorithm + if tmp, ok := ecutil.AlgorithmForCurve(rawKey.Curve); ok { + crv = tmp + } else { + return fmt.Errorf(`invalid elliptic curve %s`, rawKey.Curve) + } + k.crv = &crv + + return nil +} + +func (k *ecdsaPrivateKey) FromRaw(rawKey *ecdsa.PrivateKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + if rawKey.PublicKey.X == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + if rawKey.PublicKey.Y == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + if rawKey.D == nil { + return fmt.Errorf(`invalid ecdsa.PrivateKey`) + } + + xbuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.X, rawKey.Curve) + ybuf := ecutil.AllocECPointBuffer(rawKey.PublicKey.Y, rawKey.Curve) + dbuf := ecutil.AllocECPointBuffer(rawKey.D, rawKey.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + defer ecutil.ReleaseECPointBuffer(dbuf) + + k.x = make([]byte, len(xbuf)) + copy(k.x, xbuf) + k.y = make([]byte, len(ybuf)) + copy(k.y, ybuf) + k.d = make([]byte, len(dbuf)) + copy(k.d, dbuf) + + var crv jwa.EllipticCurveAlgorithm + if tmp, ok := ecutil.AlgorithmForCurve(rawKey.Curve); ok { + crv = tmp + } else { + return fmt.Errorf(`invalid elliptic curve %s`, rawKey.Curve) + } + k.crv = &crv + + return nil +} + +func buildECDSAPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf, ybuf []byte) (*ecdsa.PublicKey, error) { + var crv elliptic.Curve + if tmp, ok := ecutil.CurveForAlgorithm(alg); ok { + crv = tmp + } else { + return nil, fmt.Errorf(`invalid curve algorithm %s`, alg) + } + + var x, y big.Int + x.SetBytes(xbuf) + y.SetBytes(ybuf) + + return &ecdsa.PublicKey{Curve: crv, X: &x, Y: &y}, nil +} + +// Raw returns the EC-DSA public key represented by this JWK +func (k *ecdsaPublicKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + pubk, err := buildECDSAPublicKey(k.Crv(), k.x, k.y) + if err != nil { + return fmt.Errorf(`failed to build public key: %w`, err) + } + + return blackmagic.AssignIfCompatible(v, pubk) +} + +func (k *ecdsaPrivateKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + pubk, err := buildECDSAPublicKey(k.Crv(), k.x, k.y) + if err != nil { + return fmt.Errorf(`failed to build public key: %w`, err) + } + + var key ecdsa.PrivateKey + var d big.Int + d.SetBytes(k.d) + key.D = &d + key.PublicKey = *pubk + + return blackmagic.AssignIfCompatible(v, &key) +} + +func makeECDSAPublicKey(v interface { + makePairs() []*HeaderPair +}) (Key, error) { + newKey := newECDSAPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, pair := range v.makePairs() { + switch pair.Key { + case ECDSADKey: + continue + default: + //nolint:forcetypeassert + key := pair.Key.(string) + if err := newKey.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, key, err) + } + } + } + + return newKey, nil +} + +func (k *ecdsaPrivateKey) PublicKey() (Key, error) { + return makeECDSAPublicKey(k) +} + +func (k *ecdsaPublicKey) PublicKey() (Key, error) { + return makeECDSAPublicKey(k) +} + +func ecdsaThumbprint(hash crypto.Hash, crv, x, y string) []byte { + h := hash.New() + fmt.Fprint(h, `{"crv":"`) + fmt.Fprint(h, crv) + fmt.Fprint(h, `","kty":"EC","x":"`) + fmt.Fprint(h, x) + fmt.Fprint(h, `","y":"`) + fmt.Fprint(h, y) + fmt.Fprint(h, `"}`) + return h.Sum(nil) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k ecdsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key ecdsa.PublicKey + if err := k.Raw(&key); err != nil { + return nil, fmt.Errorf(`failed to materialize ecdsa.PublicKey for thumbprint generation: %w`, err) + } + + xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve) + ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + return ecdsaThumbprint( + hash, + key.Curve.Params().Name, + base64.EncodeToString(xbuf), + base64.EncodeToString(ybuf), + ), nil +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k ecdsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key ecdsa.PrivateKey + if err := k.Raw(&key); err != nil { + return nil, fmt.Errorf(`failed to materialize ecdsa.PrivateKey for thumbprint generation: %w`, err) + } + + xbuf := ecutil.AllocECPointBuffer(key.X, key.Curve) + ybuf := ecutil.AllocECPointBuffer(key.Y, key.Curve) + defer ecutil.ReleaseECPointBuffer(xbuf) + defer ecutil.ReleaseECPointBuffer(ybuf) + + return ecdsaThumbprint( + hash, + key.Curve.Params().Name, + base64.EncodeToString(xbuf), + base64.EncodeToString(ybuf), + ), nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go new file mode 100644 index 0000000000..92f56f5a6e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/ecdsa_gen.go @@ -0,0 +1,1181 @@ +// This file is auto-generated by jwk/internal/cmd/genheader/main.go. DO NOT EDIT + +package jwk + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +const ( + ECDSACrvKey = "crv" + ECDSADKey = "d" + ECDSAXKey = "x" + ECDSAYKey = "y" +) + +type ECDSAPublicKey interface { + Key + FromRaw(*ecdsa.PublicKey) error + Crv() jwa.EllipticCurveAlgorithm + X() []byte + Y() []byte +} + +type ecdsaPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + y []byte + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ ECDSAPublicKey = &ecdsaPublicKey{} +var _ Key = &ecdsaPublicKey{} + +func newECDSAPublicKey() *ecdsaPublicKey { + return &ecdsaPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h ecdsaPublicKey) KeyType() jwa.KeyType { + return jwa.EC +} + +func (h *ecdsaPublicKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *ecdsaPublicKey) Crv() jwa.EllipticCurveAlgorithm { + if h.crv != nil { + return *(h.crv) + } + return jwa.InvalidEllipticCurve +} + +func (h *ecdsaPublicKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *ecdsaPublicKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *ecdsaPublicKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *ecdsaPublicKey) X() []byte { + return h.x +} + +func (h *ecdsaPublicKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *ecdsaPublicKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *ecdsaPublicKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *ecdsaPublicKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *ecdsaPublicKey) Y() []byte { + return h.y +} + +func (h *ecdsaPublicKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.EC}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.crv != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSACrvKey, Value: *(h.crv)}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.x != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSAXKey, Value: h.x}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + if h.y != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSAYKey, Value: h.y}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *ecdsaPublicKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *ecdsaPublicKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case ECDSACrvKey: + if h.crv == nil { + return nil, false + } + return *(h.crv), true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case ECDSAXKey: + if h.x == nil { + return nil, false + } + return h.x, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + case ECDSAYKey: + if h.y == nil { + return nil, false + } + return h.y, true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *ecdsaPublicKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *ecdsaPublicKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case ECDSACrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case ECDSAXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + case ECDSAYKey: + if v, ok := value.([]byte); ok { + h.y = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *ecdsaPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case ECDSACrvKey: + k.crv = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case ECDSAXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + case ECDSAYKey: + k.y = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *ecdsaPublicKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *ecdsaPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *ecdsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *ecdsaPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.y = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.EC.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case ECDSACrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err) + } + h.crv = &decoded + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case ECDSAXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + case ECDSAYKey: + if err := json.AssignNextBytesToken(&h.y, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + if h.y == nil { + return fmt.Errorf(`required field y is missing`) + } + return nil +} + +func (h ecdsaPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 11) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *ecdsaPublicKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *ecdsaPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *ecdsaPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} + +type ECDSAPrivateKey interface { + Key + FromRaw(*ecdsa.PrivateKey) error + Crv() jwa.EllipticCurveAlgorithm + D() []byte + X() []byte + Y() []byte +} + +type ecdsaPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + d []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + y []byte + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ ECDSAPrivateKey = &ecdsaPrivateKey{} +var _ Key = &ecdsaPrivateKey{} + +func newECDSAPrivateKey() *ecdsaPrivateKey { + return &ecdsaPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h ecdsaPrivateKey) KeyType() jwa.KeyType { + return jwa.EC +} + +func (h *ecdsaPrivateKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *ecdsaPrivateKey) Crv() jwa.EllipticCurveAlgorithm { + if h.crv != nil { + return *(h.crv) + } + return jwa.InvalidEllipticCurve +} + +func (h *ecdsaPrivateKey) D() []byte { + return h.d +} + +func (h *ecdsaPrivateKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *ecdsaPrivateKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *ecdsaPrivateKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *ecdsaPrivateKey) X() []byte { + return h.x +} + +func (h *ecdsaPrivateKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *ecdsaPrivateKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *ecdsaPrivateKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *ecdsaPrivateKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *ecdsaPrivateKey) Y() []byte { + return h.y +} + +func (h *ecdsaPrivateKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.EC}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.crv != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSACrvKey, Value: *(h.crv)}) + } + if h.d != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSADKey, Value: h.d}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.x != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSAXKey, Value: h.x}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + if h.y != nil { + pairs = append(pairs, &HeaderPair{Key: ECDSAYKey, Value: h.y}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *ecdsaPrivateKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *ecdsaPrivateKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case ECDSACrvKey: + if h.crv == nil { + return nil, false + } + return *(h.crv), true + case ECDSADKey: + if h.d == nil { + return nil, false + } + return h.d, true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case ECDSAXKey: + if h.x == nil { + return nil, false + } + return h.x, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + case ECDSAYKey: + if h.y == nil { + return nil, false + } + return h.y, true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *ecdsaPrivateKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *ecdsaPrivateKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case ECDSACrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSACrvKey, value) + case ECDSADKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSADKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case ECDSAXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + case ECDSAYKey: + if v, ok := value.([]byte); ok { + h.y = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ECDSAYKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *ecdsaPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case ECDSACrvKey: + k.crv = nil + case ECDSADKey: + k.d = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case ECDSAXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + case ECDSAYKey: + k.y = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *ecdsaPrivateKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *ecdsaPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *ecdsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *ecdsaPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.d = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.y = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.EC.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case ECDSACrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSACrvKey, err) + } + h.crv = &decoded + case ECDSADKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSADKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case ECDSAXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + case ECDSAYKey: + if err := json.AssignNextBytesToken(&h.y, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ECDSAYKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + if h.y == nil { + return fmt.Errorf(`required field y is missing`) + } + return nil +} + +func (h ecdsaPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 12) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *ecdsaPrivateKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *ecdsaPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *ecdsaPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go new file mode 100644 index 0000000000..1a9d2346a4 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/es256k.go @@ -0,0 +1,14 @@ +//go:build jwx_es256k +// +build jwx_es256k + +package jwk + +import ( + "github.com/decred/dcrd/dcrec/secp256k1/v4" + "github.com/lestrrat-go/jwx/v2/internal/ecutil" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +func init() { + ecutil.RegisterCurve(secp256k1.S256(), jwa.Secp256k1) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go new file mode 100644 index 0000000000..daca17734c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/fetch.go @@ -0,0 +1,76 @@ +package jwk + +import ( + "context" + "fmt" + "io" + "os" + "strconv" + + "github.com/lestrrat-go/httprc" +) + +type Fetcher interface { + Fetch(context.Context, string, ...FetchOption) (Set, error) +} + +type FetchFunc func(context.Context, string, ...FetchOption) (Set, error) + +func (f FetchFunc) Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { + return f(ctx, u, options...) +} + +var globalFetcher httprc.Fetcher + +func init() { + var nworkers int + v := os.Getenv(`JWK_FETCHER_WORKER_COUNT`) + if c, err := strconv.ParseInt(v, 10, 64); err == nil { + nworkers = int(c) + } + if nworkers < 1 { + nworkers = 3 + } + + globalFetcher = httprc.NewFetcher(context.Background(), httprc.WithFetcherWorkerCount(nworkers)) +} + +// Fetch fetches a JWK resource specified by a URL. The url must be +// pointing to a resource that is supported by `net/http`. +// +// If you are using the same `jwk.Set` for long periods of time during +// the lifecycle of your program, and would like to periodically refresh the +// contents of the object with the data at the remote resource, +// consider using `jwk.Cache`, which automatically refreshes +// jwk.Set objects asynchronously. +func Fetch(ctx context.Context, u string, options ...FetchOption) (Set, error) { + var hrfopts []httprc.FetchOption + var parseOptions []ParseOption + for _, option := range options { + if parseOpt, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, parseOpt) + continue + } + + //nolint:forcetypeassert + switch option.Ident() { + case identHTTPClient{}: + hrfopts = append(hrfopts, httprc.WithHTTPClient(option.Value().(HTTPClient))) + case identFetchWhitelist{}: + hrfopts = append(hrfopts, httprc.WithWhitelist(option.Value().(httprc.Whitelist))) + } + } + + res, err := globalFetcher.Fetch(ctx, u, hrfopts...) + if err != nil { + return nil, fmt.Errorf(`failed to fetch %q: %w`, u, err) + } + + buf, err := io.ReadAll(res.Body) + defer res.Body.Close() + if err != nil { + return nil, fmt.Errorf(`failed to read response body for %q: %w`, u, err) + } + + return Parse(buf, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go new file mode 100644 index 0000000000..729a0ec6c5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface.go @@ -0,0 +1,137 @@ +package jwk + +import ( + "context" + "sync" + + "github.com/lestrrat-go/iter/arrayiter" + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" +) + +// KeyUsageType is used to denote what this key should be used for +type KeyUsageType string + +const ( + // ForSignature is the value used in the headers to indicate that + // this key should be used for signatures + ForSignature KeyUsageType = "sig" + // ForEncryption is the value used in the headers to indicate that + // this key should be used for encrypting + ForEncryption KeyUsageType = "enc" +) + +type KeyOperation string +type KeyOperationList []KeyOperation + +const ( + KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC) + KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC) + KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content) + KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable) + KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key) + KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable) + KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key) + KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key) +) + +// Set represents JWKS object, a collection of jwk.Key objects. +// +// Sets can be safely converted to and from JSON using the standard +// `"encoding/json".Marshal` and `"encoding/json".Unmarshal`. However, +// if you do not know if the payload contains a single JWK or a JWK set, +// consider using `jwk.Parse()` to always get a `jwk.Set` out of it. +// +// Since v1.2.12, JWK sets with private parameters can be parsed as well. +// Such private parameters can be accessed via the `Field()` method. +// If a resource contains a single JWK instead of a JWK set, private parameters +// are stored in _both_ the resulting `jwk.Set` object and the `jwk.Key` object . +// +//nolint:interfacebloat +type Set interface { + // AddKey adds the specified key. If the key already exists in the set, + // an error is returned. + AddKey(Key) error + + // Clear resets the list of keys associated with this set, emptying the + // internal list of `jwk.Key`s, as well as clearing any other non-key + // fields + Clear() error + + // Get returns the key at index `idx`. If the index is out of range, + // then the second return value is false. + Key(int) (Key, bool) + + // Get returns the value of a private field in the key set. + // + // For the purposes of a key set, any field other than the "keys" field is + // considered to be a private field. In other words, you cannot use this + // method to directly access the list of keys in the set + Get(string) (interface{}, bool) + + // Set sets the value of a single field. + // + // This method, which takes an `interface{}`, exists because + // these objects can contain extra _arbitrary_ fields that users can + // specify, and there is no way of knowing what type they could be. + Set(string, interface{}) error + + // RemoveKey removes the specified non-key field from the set. + // Keys may not be removed using this method. + Remove(string) error + + // Index returns the index where the given key exists, -1 otherwise + Index(Key) int + + // Len returns the number of keys in the set + Len() int + + // LookupKeyID returns the first key matching the given key id. + // The second return value is false if there are no keys matching the key id. + // The set *may* contain multiple keys with the same key id. If you + // need all of them, use `Iterate()` + LookupKeyID(string) (Key, bool) + + // RemoveKey removes the key from the set. + RemoveKey(Key) error + + // Keys creates an iterator to iterate through all keys in the set. + Keys(context.Context) KeyIterator + + // Iterate creates an iterator to iterate through all fields other than the keys + Iterate(context.Context) HeaderIterator + + // Clone create a new set with identical keys. Keys themselves are not cloned. + Clone() (Set, error) +} + +type set struct { + keys []Key + mu sync.RWMutex + dc DecodeCtx + privateParams map[string]interface{} +} + +type HeaderVisitor = iter.MapVisitor +type HeaderVisitorFunc = iter.MapVisitorFunc +type HeaderPair = mapiter.Pair +type HeaderIterator = mapiter.Iterator +type KeyPair = arrayiter.Pair +type KeyIterator = arrayiter.Iterator + +type PublicKeyer interface { + // PublicKey creates the corresponding PublicKey type for this object. + // All fields are copied onto the new public key, except for those that are not allowed. + // Returned value must not be the receiver itself. + PublicKey() (Key, error) +} + +type DecodeCtx interface { + json.DecodeCtx + IgnoreParseError() bool +} +type KeyWithDecodeCtx interface { + SetDecodeCtx(DecodeCtx) + DecodeCtx() DecodeCtx +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go new file mode 100644 index 0000000000..ee765127d6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/interface_gen.go @@ -0,0 +1,116 @@ +// This file is auto-generated. DO NOT EDIT + +package jwk + +import ( + "context" + "crypto" + + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +const ( + KeyTypeKey = "kty" + KeyUsageKey = "use" + KeyOpsKey = "key_ops" + AlgorithmKey = "alg" + KeyIDKey = "kid" + X509URLKey = "x5u" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" +) + +// Key defines the minimal interface for each of the +// key types. Their use and implementation differ significantly +// between each key types, so you should use type assertions +// to perform more specific tasks with each key +type Key interface { + // Get returns the value of a single field. The second boolean return value + // will be false if the field is not stored in the source + // + // This method, which returns an `interface{}`, exists because + // these objects can contain extra _arbitrary_ fields that users can + // specify, and there is no way of knowing what type they could be + Get(string) (interface{}, bool) + + // Set sets the value of a single field. Note that certain fields, + // notably "kty", cannot be altered, but will not return an error + // + // This method, which takes an `interface{}`, exists because + // these objects can contain extra _arbitrary_ fields that users can + // specify, and there is no way of knowing what type they could be + Set(string, interface{}) error + + // Remove removes the field associated with the specified key. + // There is no way to remove the `kty` (key type). You will ALWAYS be left with one field in a jwk.Key. + Remove(string) error + + // Raw creates the corresponding raw key. For example, + // EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey, + // and OctetSeq types create a []byte key. + // + // If you do not know the exact type of a jwk.Key before attempting + // to obtain the raw key, you can simply pass a pointer to an + // empty interface as the first argument. + // + // If you already know the exact type, it is recommended that you + // pass a pointer to the zero value of the actual key type (e.g. &rsa.PrivateKey) + // for efficiency. + Raw(interface{}) error + + // Thumbprint returns the JWK thumbprint using the indicated + // hashing algorithm, according to RFC 7638 + Thumbprint(crypto.Hash) ([]byte, error) + + // Iterate returns an iterator that returns all keys and values. + // See github.com/lestrrat-go/iter for a description of the iterator. + Iterate(ctx context.Context) HeaderIterator + + // Walk is a utility tool that allows a visitor to iterate all keys and values + Walk(context.Context, HeaderVisitor) error + + // AsMap is a utility tool that returns a new map that contains the same fields as the source + AsMap(context.Context) (map[string]interface{}, error) + + // PrivateParams returns the non-standard elements in the source structure + // WARNING: DO NOT USE PrivateParams() IF YOU HAVE CONCURRENT CODE ACCESSING THEM. + // Use `AsMap()` to get a copy of the entire header, or use `Iterate()` instead + PrivateParams() map[string]interface{} + + // Clone creates a new instance of the same type + Clone() (Key, error) + + // PublicKey creates the corresponding PublicKey type for this object. + // All fields are copied onto the new public key, except for those that are not allowed. + // + // If the key is already a public key, it returns a new copy minus the disallowed fields as above. + PublicKey() (Key, error) + + // KeyType returns the `kty` of a JWK + KeyType() jwa.KeyType + // KeyUsage returns `use` of a JWK + KeyUsage() string + // KeyOps returns `key_ops` of a JWK + KeyOps() KeyOperationList + // Algorithm returns `alg` of a JWK + + // Algorithm returns the value of the `alg` field + // + // This field may contain either `jwk.SignatureAlgorithm` or `jwk.KeyEncryptionAlgorithm`. + // This is why there exists a `jwa.KeyAlgorithm` type that encompases both types. + Algorithm() jwa.KeyAlgorithm + // KeyID returns `kid` of a JWK + KeyID() string + // X509URL returns `x5u` of a JWK + X509URL() string + // X509CertChain returns `x5c` of a JWK + X509CertChain() *cert.Chain + // X509CertThumbprint returns `x5t` of a JWK + X509CertThumbprint() string + // X509CertThumbprintS256 returns `x5t#S256` of a JWK + X509CertThumbprintS256() string + + makePairs() []*HeaderPair +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go new file mode 100644 index 0000000000..4e0d487eee --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/io.go @@ -0,0 +1,42 @@ +// Automatically generated by internal/cmd/genreadfile/main.go. DO NOT EDIT + +package jwk + +import ( + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (Set, error) { + var parseOptions []ParseOption + var readFileOptions []ReadFileOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } else { + readFileOptions = append(readFileOptions, option) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + srcFS = option.Value().(fs.FS) + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go new file mode 100644 index 0000000000..3d4671c1fc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/jwk.go @@ -0,0 +1,726 @@ +//go:generate ../tools/cmd/genjwk.sh + +// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517 +package jwk + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "math/big" + + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/ecutil" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +var registry = json.NewRegistry() + +func bigIntToBytes(n *big.Int) ([]byte, error) { + if n == nil { + return nil, fmt.Errorf(`invalid *big.Int value`) + } + return n.Bytes(), nil +} + +// FromRaw creates a jwk.Key from the given key (RSA/ECDSA/symmetric keys). +// +// The constructor auto-detects the type of key to be instantiated +// based on the input type: +// +// - "crypto/rsa".PrivateKey and "crypto/rsa".PublicKey creates an RSA based key +// - "crypto/ecdsa".PrivateKey and "crypto/ecdsa".PublicKey creates an EC based key +// - "crypto/ed25519".PrivateKey and "crypto/ed25519".PublicKey creates an OKP based key +// - []byte creates a symmetric key +func FromRaw(key interface{}) (Key, error) { + if key == nil { + return nil, fmt.Errorf(`jwk.New requires a non-nil key`) + } + + var ptr interface{} + switch v := key.(type) { + case rsa.PrivateKey: + ptr = &v + case rsa.PublicKey: + ptr = &v + case ecdsa.PrivateKey: + ptr = &v + case ecdsa.PublicKey: + ptr = &v + default: + ptr = v + } + + switch rawKey := ptr.(type) { + case *rsa.PrivateKey: + k := newRSAPrivateKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case *rsa.PublicKey: + k := newRSAPublicKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case *ecdsa.PrivateKey: + k := newECDSAPrivateKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case *ecdsa.PublicKey: + k := newECDSAPublicKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case ed25519.PrivateKey: + k := newOKPPrivateKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case ed25519.PublicKey: + k := newOKPPublicKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case x25519.PrivateKey: + k := newOKPPrivateKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case x25519.PublicKey: + k := newOKPPublicKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + case []byte: + k := newSymmetricKey() + if err := k.FromRaw(rawKey); err != nil { + return nil, fmt.Errorf(`failed to initialize %T from %T: %w`, k, rawKey, err) + } + return k, nil + default: + return nil, fmt.Errorf(`invalid key type '%T' for jwk.New`, key) + } +} + +// PublicSetOf returns a new jwk.Set consisting of +// public keys of the keys contained in the set. +// +// This is useful when you are generating a set of private keys, and +// you want to generate the corresponding public versions for the +// users to verify with. +// +// Be aware that all fields will be copied onto the new public key. It is the caller's +// responsibility to remove any fields, if necessary. +func PublicSetOf(v Set) (Set, error) { + newSet := NewSet() + + n := v.Len() + for i := 0; i < n; i++ { + k, ok := v.Key(i) + if !ok { + return nil, fmt.Errorf(`key not found`) + } + pubKey, err := PublicKeyOf(k) + if err != nil { + return nil, fmt.Errorf(`failed to get public key of %T: %w`, k, err) + } + if err := newSet.AddKey(pubKey); err != nil { + return nil, fmt.Errorf(`failed to add key to public key set: %w`, err) + } + } + + return newSet, nil +} + +// PublicKeyOf returns the corresponding public version of the jwk.Key. +// If `v` is a SymmetricKey, then the same value is returned. +// If `v` is already a public key, the key itself is returned. +// +// If `v` is a private key type that has a `PublicKey()` method, be aware +// that all fields will be copied onto the new public key. It is the caller's +// responsibility to remove any fields, if necessary +// +// If `v` is a raw key, the key is first converted to a `jwk.Key` +func PublicKeyOf(v interface{}) (Key, error) { + // This should catch all jwk.Key instances + if pk, ok := v.(PublicKeyer); ok { + return pk.PublicKey() + } + + jk, err := FromRaw(v) + if err != nil { + return nil, fmt.Errorf(`failed to convert key into JWK: %w`, err) + } + + return jk.PublicKey() +} + +// PublicRawKeyOf returns the corresponding public key of the given +// value `v` (e.g. given *rsa.PrivateKey, *rsa.PublicKey is returned) +// If `v` is already a public key, the key itself is returned. +// +// The returned value will always be a pointer to the public key, +// except when a []byte (e.g. symmetric key, ed25519 key) is passed to `v`. +// In this case, the same []byte value is returned. +func PublicRawKeyOf(v interface{}) (interface{}, error) { + if pk, ok := v.(PublicKeyer); ok { + pubk, err := pk.PublicKey() + if err != nil { + return nil, fmt.Errorf(`failed to obtain public key from %T: %w`, v, err) + } + + var raw interface{} + if err := pubk.Raw(&raw); err != nil { + return nil, fmt.Errorf(`failed to obtain raw key from %T: %w`, pubk, err) + } + return raw, nil + } + + // This may be a silly idea, but if the user gave us a non-pointer value... + var ptr interface{} + switch v := v.(type) { + case rsa.PrivateKey: + ptr = &v + case rsa.PublicKey: + ptr = &v + case ecdsa.PrivateKey: + ptr = &v + case ecdsa.PublicKey: + ptr = &v + default: + ptr = v + } + + switch x := ptr.(type) { + case *rsa.PrivateKey: + return &x.PublicKey, nil + case *rsa.PublicKey: + return x, nil + case *ecdsa.PrivateKey: + return &x.PublicKey, nil + case *ecdsa.PublicKey: + return x, nil + case ed25519.PrivateKey: + return x.Public(), nil + case ed25519.PublicKey: + return x, nil + case x25519.PrivateKey: + return x.Public(), nil + case x25519.PublicKey: + return x, nil + case []byte: + return x, nil + default: + return nil, fmt.Errorf(`invalid key type passed to PublicKeyOf (%T)`, v) + } +} + +const ( + pmPrivateKey = `PRIVATE KEY` + pmPublicKey = `PUBLIC KEY` +) + +// EncodeX509 encodes the key into a byte sequence in ASN.1 DER format +// suitable for to be PEM encoded. The key can be a jwk.Key or a raw key +// instance, but it must be one of the types supported by `x509` package. +// +// This function will try to do the right thing depending on the key type +// (i.e. switch between `x509.MarshalPKCS1PRivateKey` and `x509.MarshalECPrivateKey`), +// but for public keys, it will always use `x509.MarshalPKIXPublicKey`. +// Please manually perform the encoding if you need more fine grained control +// +// The first return value is the name that can be used for `(pem.Block).Type`. +// The second return value is the encoded byte sequence. +func EncodeX509(v interface{}) (string, []byte, error) { + // we can't import jwk, so just use the interface + if key, ok := v.(interface{ Raw(interface{}) error }); ok { + var raw interface{} + if err := key.Raw(&raw); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key out of %T: %w`, key, err) + } + + v = raw + } + + // Try to convert it into a certificate + switch v := v.(type) { + case *rsa.PrivateKey: + return "RSA PRIVATE KEY", x509.MarshalPKCS1PrivateKey(v), nil + case *ecdsa.PrivateKey: + marshaled, err := x509.MarshalECPrivateKey(v) + if err != nil { + return "", nil, err + } + return "ECDSA PRIVATE KEY", marshaled, nil + case ed25519.PrivateKey: + marshaled, err := x509.MarshalPKCS8PrivateKey(v) + if err != nil { + return "", nil, err + } + return pmPrivateKey, marshaled, nil + case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: + marshaled, err := x509.MarshalPKIXPublicKey(v) + if err != nil { + return "", nil, err + } + return pmPublicKey, marshaled, nil + default: + return "", nil, fmt.Errorf(`unsupported type %T for ASN.1 DER encoding`, v) + } +} + +// EncodePEM encodes the key into a PEM encoded ASN.1 DER format. +// The key can be a jwk.Key or a raw key instance, but it must be one of +// the types supported by `x509` package. +// +// Internally, it uses the same routine as `jwk.EncodeX509()`, and therefore +// the same caveats apply +func EncodePEM(v interface{}) ([]byte, error) { + typ, marshaled, err := EncodeX509(v) + if err != nil { + return nil, fmt.Errorf(`failed to encode key in x509: %w`, err) + } + + block := &pem.Block{ + Type: typ, + Bytes: marshaled, + } + return pem.EncodeToMemory(block), nil +} + +// DecodePEM decodes a key in PEM encoded ASN.1 DER format. +// and returns a raw key +func DecodePEM(src []byte) (interface{}, []byte, error) { + block, rest := pem.Decode(src) + if block == nil { + return nil, nil, fmt.Errorf(`failed to decode PEM data`) + } + + switch block.Type { + // Handle the semi-obvious cases + case "RSA PRIVATE KEY": + key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse PKCS1 private key: %w`, err) + } + return key, rest, nil + case "RSA PUBLIC KEY": + key, err := x509.ParsePKCS1PublicKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse PKCS1 public key: %w`, err) + } + return key, rest, nil + case "EC PRIVATE KEY": + key, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse EC private key: %w`, err) + } + return key, rest, nil + case pmPublicKey: + // XXX *could* return dsa.PublicKey + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse PKIX public key: %w`, err) + } + return key, rest, nil + case pmPrivateKey: + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse PKCS8 private key: %w`, err) + } + return key, rest, nil + case "CERTIFICATE": + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, fmt.Errorf(`failed to parse certificate: %w`, err) + } + return cert.PublicKey, rest, nil + default: + return nil, nil, fmt.Errorf(`invalid PEM block type %s`, block.Type) + } +} + +// ParseRawKey is a combination of ParseKey and Raw. It parses a single JWK key, +// and assigns the "raw" key to the given parameter. The key must either be +// a pointer to an empty interface, or a pointer to the actual raw key type +// such as *rsa.PrivateKey, *ecdsa.PublicKey, *[]byte, etc. +func ParseRawKey(data []byte, rawkey interface{}) error { + key, err := ParseKey(data) + if err != nil { + return fmt.Errorf(`failed to parse key: %w`, err) + } + + if err := key.Raw(rawkey); err != nil { + return fmt.Errorf(`failed to assign to raw key variable: %w`, err) + } + + return nil +} + +type setDecodeCtx struct { + json.DecodeCtx + ignoreParseError bool +} + +func (ctx *setDecodeCtx) IgnoreParseError() bool { + return ctx.ignoreParseError +} + +// ParseKey parses a single key JWK. Unlike `jwk.Parse` this method will +// report failure if you attempt to pass a JWK set. Only use this function +// when you know that the data is a single JWK. +// +// Given a WithPEM(true) option, this function assumes that the given input +// is PEM encoded ASN.1 DER format key. +// +// Note that a successful parsing of any type of key does NOT necessarily +// guarantee a valid key. For example, no checks against expiration dates +// are performed for certificate expiration, no checks against missing +// parameters are performed, etc. +func ParseKey(data []byte, options ...ParseOption) (Key, error) { + var parsePEM bool + var localReg *json.Registry + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identPEM{}: + parsePEM = option.Value().(bool) + case identLocalRegistry{}: + // in reality you can only pass either withLocalRegistry or + // WithTypedField, but since withLocalRegistry is used only by us, + // we skip checking + localReg = option.Value().(*json.Registry) + case identTypedField{}: + pair := option.Value().(typedFieldPair) + if localReg == nil { + localReg = json.NewRegistry() + } + localReg.Register(pair.Name, pair.Value) + case identIgnoreParseError{}: + return nil, fmt.Errorf(`jwk.WithIgnoreParseError() cannot be used for ParseKey()`) + } + } + + if parsePEM { + raw, _, err := DecodePEM(data) + if err != nil { + return nil, fmt.Errorf(`failed to parse PEM encoded key: %w`, err) + } + return FromRaw(raw) + } + + var hint struct { + Kty string `json:"kty"` + D json.RawMessage `json:"d"` + } + + if err := json.Unmarshal(data, &hint); err != nil { + return nil, fmt.Errorf(`failed to unmarshal JSON into key hint: %w`, err) + } + + var key Key + switch jwa.KeyType(hint.Kty) { + case jwa.RSA: + if len(hint.D) > 0 { + key = newRSAPrivateKey() + } else { + key = newRSAPublicKey() + } + case jwa.EC: + if len(hint.D) > 0 { + key = newECDSAPrivateKey() + } else { + key = newECDSAPublicKey() + } + case jwa.OctetSeq: + key = newSymmetricKey() + case jwa.OKP: + if len(hint.D) > 0 { + key = newOKPPrivateKey() + } else { + key = newOKPPublicKey() + } + default: + return nil, fmt.Errorf(`invalid key type from JSON (%s)`, hint.Kty) + } + + if localReg != nil { + dcKey, ok := key.(json.DecodeCtxContainer) + if !ok { + return nil, fmt.Errorf(`typed field was requested, but the key (%T) does not support DecodeCtx`, key) + } + dc := json.NewDecodeCtx(localReg) + dcKey.SetDecodeCtx(dc) + defer func() { dcKey.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(data, key); err != nil { + return nil, fmt.Errorf(`failed to unmarshal JSON into key (%T): %w`, key, err) + } + + return key, nil +} + +// Parse parses JWK from the incoming []byte. +// +// For JWK sets, this is a convenience function. You could just as well +// call `json.Unmarshal` against an empty set created by `jwk.NewSet()` +// to parse a JSON buffer into a `jwk.Set`. +// +// This function exists because many times the user does not know before hand +// if a JWK(s) resource at a remote location contains a single JWK key or +// a JWK set, and `jwk.Parse()` can handle either case, returning a JWK Set +// even if the data only contains a single JWK key +// +// If you are looking for more information on how JWKs are parsed, or if +// you know for sure that you have a single key, please see the documentation +// for `jwk.ParseKey()`. +func Parse(src []byte, options ...ParseOption) (Set, error) { + var parsePEM bool + var localReg *json.Registry + var ignoreParseError bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identPEM{}: + parsePEM = option.Value().(bool) + case identIgnoreParseError{}: + ignoreParseError = option.Value().(bool) + case identTypedField{}: + pair := option.Value().(typedFieldPair) + if localReg == nil { + localReg = json.NewRegistry() + } + localReg.Register(pair.Name, pair.Value) + } + } + + s := NewSet() + + if parsePEM { + src = bytes.TrimSpace(src) + for len(src) > 0 { + raw, rest, err := DecodePEM(src) + if err != nil { + return nil, fmt.Errorf(`failed to parse PEM encoded key: %w`, err) + } + key, err := FromRaw(raw) + if err != nil { + return nil, fmt.Errorf(`failed to create jwk.Key from %T: %w`, raw, err) + } + if err := s.AddKey(key); err != nil { + return nil, fmt.Errorf(`failed to add jwk.Key to set: %w`, err) + } + src = bytes.TrimSpace(rest) + } + return s, nil + } + + if localReg != nil || ignoreParseError { + dcKs, ok := s.(KeyWithDecodeCtx) + if !ok { + return nil, fmt.Errorf(`typed field was requested, but the key set (%T) does not support DecodeCtx`, s) + } + dc := &setDecodeCtx{ + DecodeCtx: json.NewDecodeCtx(localReg), + ignoreParseError: ignoreParseError, + } + dcKs.SetDecodeCtx(dc) + defer func() { dcKs.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(src, s); err != nil { + return nil, fmt.Errorf(`failed to unmarshal JWK set: %w`, err) + } + + return s, nil +} + +// ParseReader parses a JWK set from the incoming byte buffer. +func ParseReader(src io.Reader, options ...ParseOption) (Set, error) { + // meh, there's no way to tell if a stream has "ended" a single + // JWKs except when we encounter an EOF, so just... ReadAll + buf, err := io.ReadAll(src) + if err != nil { + return nil, fmt.Errorf(`failed to read from io.Reader: %w`, err) + } + + return Parse(buf, options...) +} + +// ParseString parses a JWK set from the incoming string. +func ParseString(s string, options ...ParseOption) (Set, error) { + return Parse([]byte(s), options...) +} + +// AssignKeyID is a convenience function to automatically assign the "kid" +// section of the key, if it already doesn't have one. It uses Key.Thumbprint +// method with crypto.SHA256 as the default hashing algorithm +func AssignKeyID(key Key, options ...AssignKeyIDOption) error { + if _, ok := key.Get(KeyIDKey); ok { + return nil + } + + hash := crypto.SHA256 + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identThumbprintHash{}: + hash = option.Value().(crypto.Hash) + } + } + + h, err := key.Thumbprint(hash) + if err != nil { + return fmt.Errorf(`failed to generate thumbprint: %w`, err) + } + + if err := key.Set(KeyIDKey, base64.EncodeToString(h)); err != nil { + return fmt.Errorf(`failed to set "kid": %w`, err) + } + + return nil +} + +func cloneKey(src Key) (Key, error) { + var dst Key + switch src.(type) { + case RSAPrivateKey: + dst = newRSAPrivateKey() + case RSAPublicKey: + dst = newRSAPublicKey() + case ECDSAPrivateKey: + dst = newECDSAPrivateKey() + case ECDSAPublicKey: + dst = newECDSAPublicKey() + case OKPPrivateKey: + dst = newOKPPrivateKey() + case OKPPublicKey: + dst = newOKPPublicKey() + case SymmetricKey: + dst = newSymmetricKey() + default: + return nil, fmt.Errorf(`unknown key type %T`, src) + } + + for _, pair := range src.makePairs() { + //nolint:forcetypeassert + key := pair.Key.(string) + if err := dst.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set %q: %w`, key, err) + } + } + return dst, nil +} + +// Pem serializes the given jwk.Key in PEM encoded ASN.1 DER format, +// using either PKCS8 for private keys and PKIX for public keys. +// If you need to encode using PKCS1 or SEC1, you must do it yourself. +// +// # Argument must be of type jwk.Key or jwk.Set +// +// Currently only EC (including Ed25519) and RSA keys (and jwk.Set +// comprised of these key types) are supported. +func Pem(v interface{}) ([]byte, error) { + var set Set + switch v := v.(type) { + case Key: + set = NewSet() + if err := set.AddKey(v); err != nil { + return nil, fmt.Errorf(`failed to add key to set: %w`, err) + } + case Set: + set = v + default: + return nil, fmt.Errorf(`argument to Pem must be either jwk.Key or jwk.Set: %T`, v) + } + + var ret []byte + for i := 0; i < set.Len(); i++ { + key, _ := set.Key(i) + typ, buf, err := asnEncode(key) + if err != nil { + return nil, fmt.Errorf(`failed to encode content for key #%d: %w`, i, err) + } + + var block pem.Block + block.Type = typ + block.Bytes = buf + ret = append(ret, pem.EncodeToMemory(&block)...) + } + return ret, nil +} + +func asnEncode(key Key) (string, []byte, error) { + switch key := key.(type) { + case RSAPrivateKey, ECDSAPrivateKey, OKPPrivateKey: + var rawkey interface{} + if err := key.Raw(&rawkey); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err) + } + buf, err := x509.MarshalPKCS8PrivateKey(rawkey) + if err != nil { + return "", nil, fmt.Errorf(`failed to marshal PKCS8: %w`, err) + } + return pmPrivateKey, buf, nil + case RSAPublicKey, ECDSAPublicKey, OKPPublicKey: + var rawkey interface{} + if err := key.Raw(&rawkey); err != nil { + return "", nil, fmt.Errorf(`failed to get raw key from jwk.Key: %w`, err) + } + buf, err := x509.MarshalPKIXPublicKey(rawkey) + if err != nil { + return "", nil, fmt.Errorf(`failed to marshal PKIX: %w`, err) + } + return pmPublicKey, buf, nil + default: + return "", nil, fmt.Errorf(`unsupported key type %T`, key) + } +} + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In that case you would register a custom field as follows +// +// jwk.RegisterCustomField(`x-birthday`, timeT) +// +// Then `key.Get("x-birthday")` will still return an `interface{}`, +// but you can convert its type to `time.Time` +// +// bdayif, _ := key.Get(`x-birthday`) +// bday := bdayif.(time.Time) +func RegisterCustomField(name string, object interface{}) { + registry.Register(name, object) +} + +func AvailableCurves() []elliptic.Curve { + return ecutil.AvailableCurves() +} + +func CurveForAlgorithm(alg jwa.EllipticCurveAlgorithm) (elliptic.Curve, bool) { + return ecutil.CurveForAlgorithm(alg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go new file mode 100644 index 0000000000..26fc2f28c8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/key_ops.go @@ -0,0 +1,58 @@ +package jwk + +import "fmt" + +func (ops *KeyOperationList) Get() KeyOperationList { + if ops == nil { + return nil + } + return *ops +} + +func (ops *KeyOperationList) Accept(v interface{}) error { + switch x := v.(type) { + case string: + return ops.Accept([]string{x}) + case []interface{}: + l := make([]string, len(x)) + for i, e := range x { + if es, ok := e.(string); ok { + l[i] = es + } else { + return fmt.Errorf(`invalid list element type: expected string, got %T`, v) + } + } + return ops.Accept(l) + case []string: + list := make(KeyOperationList, len(x)) + for i, e := range x { + switch e := KeyOperation(e); e { + case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits: + list[i] = e + default: + return fmt.Errorf(`invalid keyoperation %v`, e) + } + } + + *ops = list + return nil + case []KeyOperation: + list := make(KeyOperationList, len(x)) + for i, e := range x { + switch e { + case KeyOpSign, KeyOpVerify, KeyOpEncrypt, KeyOpDecrypt, KeyOpWrapKey, KeyOpUnwrapKey, KeyOpDeriveKey, KeyOpDeriveBits: + list[i] = e + default: + return fmt.Errorf(`invalid keyoperation %v`, e) + } + } + + *ops = list + return nil + case KeyOperationList: + *ops = x + return nil + default: + return fmt.Errorf(`invalid value %T`, v) + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go new file mode 100644 index 0000000000..2686ba516d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp.go @@ -0,0 +1,183 @@ +package jwk + +import ( + "bytes" + "crypto" + "crypto/ed25519" + "fmt" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +func (k *okpPublicKey) FromRaw(rawKeyIf interface{}) error { + k.mu.Lock() + defer k.mu.Unlock() + + var crv jwa.EllipticCurveAlgorithm + switch rawKey := rawKeyIf.(type) { + case ed25519.PublicKey: + k.x = rawKey + crv = jwa.Ed25519 + k.crv = &crv + case x25519.PublicKey: + k.x = rawKey + crv = jwa.X25519 + k.crv = &crv + default: + return fmt.Errorf(`unknown key type %T`, rawKeyIf) + } + + return nil +} + +func (k *okpPrivateKey) FromRaw(rawKeyIf interface{}) error { + k.mu.Lock() + defer k.mu.Unlock() + + var crv jwa.EllipticCurveAlgorithm + switch rawKey := rawKeyIf.(type) { + case ed25519.PrivateKey: + k.d = rawKey.Seed() + k.x = rawKey.Public().(ed25519.PublicKey) //nolint:forcetypeassert + crv = jwa.Ed25519 + k.crv = &crv + case x25519.PrivateKey: + k.d = rawKey.Seed() + k.x = rawKey.Public().(x25519.PublicKey) //nolint:forcetypeassert + crv = jwa.X25519 + k.crv = &crv + default: + return fmt.Errorf(`unknown key type %T`, rawKeyIf) + } + + return nil +} + +func buildOKPPublicKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte) (interface{}, error) { + switch alg { + case jwa.Ed25519: + return ed25519.PublicKey(xbuf), nil + case jwa.X25519: + return x25519.PublicKey(xbuf), nil + default: + return nil, fmt.Errorf(`invalid curve algorithm %s`, alg) + } +} + +// Raw returns the EC-DSA public key represented by this JWK +func (k *okpPublicKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + pubk, err := buildOKPPublicKey(k.Crv(), k.x) + if err != nil { + return fmt.Errorf(`failed to build public key: %w`, err) + } + + return blackmagic.AssignIfCompatible(v, pubk) +} + +func buildOKPPrivateKey(alg jwa.EllipticCurveAlgorithm, xbuf []byte, dbuf []byte) (interface{}, error) { + switch alg { + case jwa.Ed25519: + ret := ed25519.NewKeyFromSeed(dbuf) + //nolint:forcetypeassert + if !bytes.Equal(xbuf, ret.Public().(ed25519.PublicKey)) { + return nil, fmt.Errorf(`invalid x value given d value`) + } + return ret, nil + case jwa.X25519: + ret, err := x25519.NewKeyFromSeed(dbuf) + if err != nil { + return nil, fmt.Errorf(`unable to construct x25519 private key from seed: %w`, err) + } + //nolint:forcetypeassert + if !bytes.Equal(xbuf, ret.Public().(x25519.PublicKey)) { + return nil, fmt.Errorf(`invalid x value given d value`) + } + return ret, nil + default: + return nil, fmt.Errorf(`invalid curve algorithm %s`, alg) + } +} + +func (k *okpPrivateKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + privk, err := buildOKPPrivateKey(k.Crv(), k.x, k.d) + if err != nil { + return fmt.Errorf(`failed to build public key: %w`, err) + } + + return blackmagic.AssignIfCompatible(v, privk) +} + +func makeOKPPublicKey(v interface { + makePairs() []*HeaderPair +}) (Key, error) { + newKey := newOKPPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, pair := range v.makePairs() { + switch pair.Key { + case OKPDKey: + continue + default: + //nolint:forcetypeassert + key := pair.Key.(string) + if err := newKey.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, key, err) + } + } + } + + return newKey, nil +} + +func (k *okpPrivateKey) PublicKey() (Key, error) { + return makeOKPPublicKey(k) +} + +func (k *okpPublicKey) PublicKey() (Key, error) { + return makeOKPPublicKey(k) +} + +func okpThumbprint(hash crypto.Hash, crv, x string) []byte { + h := hash.New() + fmt.Fprint(h, `{"crv":"`) + fmt.Fprint(h, crv) + fmt.Fprint(h, `","kty":"OKP","x":"`) + fmt.Fprint(h, x) + fmt.Fprint(h, `"}`) + return h.Sum(nil) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 / 8037 +func (k okpPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + return okpThumbprint( + hash, + k.Crv().String(), + base64.EncodeToString(k.x), + ), nil +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 / 8037 +func (k okpPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + return okpThumbprint( + hash, + k.Crv().String(), + base64.EncodeToString(k.x), + ), nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go new file mode 100644 index 0000000000..ccad6775b5 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/okp_gen.go @@ -0,0 +1,1119 @@ +// This file is auto-generated by jwk/internal/cmd/genheader/main.go. DO NOT EDIT + +package jwk + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +const ( + OKPCrvKey = "crv" + OKPDKey = "d" + OKPXKey = "x" +) + +type OKPPublicKey interface { + Key + FromRaw(interface{}) error + Crv() jwa.EllipticCurveAlgorithm + X() []byte +} + +type okpPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ OKPPublicKey = &okpPublicKey{} +var _ Key = &okpPublicKey{} + +func newOKPPublicKey() *okpPublicKey { + return &okpPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h okpPublicKey) KeyType() jwa.KeyType { + return jwa.OKP +} + +func (h *okpPublicKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *okpPublicKey) Crv() jwa.EllipticCurveAlgorithm { + if h.crv != nil { + return *(h.crv) + } + return jwa.InvalidEllipticCurve +} + +func (h *okpPublicKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *okpPublicKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *okpPublicKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *okpPublicKey) X() []byte { + return h.x +} + +func (h *okpPublicKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *okpPublicKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *okpPublicKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *okpPublicKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *okpPublicKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OKP}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.crv != nil { + pairs = append(pairs, &HeaderPair{Key: OKPCrvKey, Value: *(h.crv)}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.x != nil { + pairs = append(pairs, &HeaderPair{Key: OKPXKey, Value: h.x}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *okpPublicKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *okpPublicKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case OKPCrvKey: + if h.crv == nil { + return nil, false + } + return *(h.crv), true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case OKPXKey: + if h.x == nil { + return nil, false + } + return h.x, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *okpPublicKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *okpPublicKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case OKPCrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case OKPXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *okpPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case OKPCrvKey: + k.crv = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case OKPXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *okpPublicKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *okpPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *okpPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *okpPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OKP.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case OKPCrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err) + } + h.crv = &decoded + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case OKPXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + return nil +} + +func (h okpPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 10) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *okpPublicKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *okpPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *okpPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} + +type OKPPrivateKey interface { + Key + FromRaw(interface{}) error + Crv() jwa.EllipticCurveAlgorithm + D() []byte + X() []byte +} + +type okpPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + crv *jwa.EllipticCurveAlgorithm + d []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + x []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ OKPPrivateKey = &okpPrivateKey{} +var _ Key = &okpPrivateKey{} + +func newOKPPrivateKey() *okpPrivateKey { + return &okpPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h okpPrivateKey) KeyType() jwa.KeyType { + return jwa.OKP +} + +func (h *okpPrivateKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *okpPrivateKey) Crv() jwa.EllipticCurveAlgorithm { + if h.crv != nil { + return *(h.crv) + } + return jwa.InvalidEllipticCurve +} + +func (h *okpPrivateKey) D() []byte { + return h.d +} + +func (h *okpPrivateKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *okpPrivateKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *okpPrivateKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *okpPrivateKey) X() []byte { + return h.x +} + +func (h *okpPrivateKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *okpPrivateKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *okpPrivateKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *okpPrivateKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *okpPrivateKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OKP}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.crv != nil { + pairs = append(pairs, &HeaderPair{Key: OKPCrvKey, Value: *(h.crv)}) + } + if h.d != nil { + pairs = append(pairs, &HeaderPair{Key: OKPDKey, Value: h.d}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.x != nil { + pairs = append(pairs, &HeaderPair{Key: OKPXKey, Value: h.x}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *okpPrivateKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *okpPrivateKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case OKPCrvKey: + if h.crv == nil { + return nil, false + } + return *(h.crv), true + case OKPDKey: + if h.d == nil { + return nil, false + } + return h.d, true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case OKPXKey: + if h.x == nil { + return nil, false + } + return h.x, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *okpPrivateKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *okpPrivateKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case OKPCrvKey: + if v, ok := value.(jwa.EllipticCurveAlgorithm); ok { + h.crv = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPCrvKey, value) + case OKPDKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPDKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case OKPXKey: + if v, ok := value.([]byte); ok { + h.x = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, OKPXKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *okpPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case OKPCrvKey: + k.crv = nil + case OKPDKey: + k.d = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case OKPXKey: + k.x = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *okpPrivateKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *okpPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *okpPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *okpPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.crv = nil + h.d = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.x = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OKP.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case OKPCrvKey: + var decoded jwa.EllipticCurveAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPCrvKey, err) + } + h.crv = &decoded + case OKPDKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPDKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case OKPXKey: + if err := json.AssignNextBytesToken(&h.x, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, OKPXKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.crv == nil { + return fmt.Errorf(`required field crv is missing`) + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.x == nil { + return fmt.Errorf(`required field x is missing`) + } + return nil +} + +func (h okpPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 11) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *okpPrivateKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *okpPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *okpPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go new file mode 100644 index 0000000000..98fcc4097a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.go @@ -0,0 +1,38 @@ +package jwk + +import ( + "github.com/lestrrat-go/option" +) + +type identTypedField struct{} + +type typedFieldPair struct { + Name string + Value interface{} +} + +// WithTypedField allows a private field to be parsed into the object type of +// your choice. It works much like the RegisterCustomField, but the effect +// is only applicable to the jwt.Parse function call which receives this option. +// +// While this can be extremely useful, this option should be used with caution: +// There are many caveats that your entire team/user-base needs to be aware of, +// and therefore in general its use is discouraged. Only use it when you know +// what you are doing, and you document its use clearly for others. +// +// First and foremost, this is a "per-object" option. Meaning that given the same +// serialized format, it is possible to generate two objects whose internal +// representations may differ. That is, if you parse one _WITH_ the option, +// and the other _WITHOUT_, their internal representation may completely differ. +// This could potentially lead to problems. +// +// Second, specifying this option will slightly slow down the decoding process +// as it needs to consult multiple definitions sources (global and local), so +// be careful if you are decoding a large number of tokens, as the effects will stack up. +func WithTypedField(name string, object interface{}) ParseOption { + return &parseOption{ + option.New(identTypedField{}, + typedFieldPair{Name: name, Value: object}, + ), + } +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml new file mode 100644 index 0000000000..3f7b6e2a16 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options.yaml @@ -0,0 +1,142 @@ +package_name: jwk +output: jwk/options_gen.go +interfaces: + - name: CacheOption + comment: | + CacheOption is a type of Option that can be passed to the + `jwk.Cache` object. + - name: AssignKeyIDOption + - name: FetchOption + methods: + - fetchOption + - parseOption + - registerOption + comment: | + FetchOption is a type of Option that can be passed to `jwk.Fetch()` + FetchOption also implements the `CacheOption`, and thus can + safely be passed to `(*jwk.Cache).Configure()` + - name: ParseOption + methods: + - fetchOption + - registerOption + - readFileOption + comment: | + ParseOption is a type of Option that can be passed to `jwk.Parse()` + ParseOption also implmentsthe `ReadFileOption` and `CacheOption`, + and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile` + - name: RegisterOption + comment: | + RegisterOption desribes options that can be passed to `(jwk.Cache).Register()` +options: + - ident: HTTPClient + interface: FetchOption + argument_type: HTTPClient + comment: | + WithHTTPClient allows users to specify the "net/http".Client object that + is used when fetching jwk.Set objects. + - ident: ThumbprintHash + interface: AssignKeyIDOption + argument_type: crypto.Hash + - ident: RefreshInterval + interface: RegisterOption + argument_type: time.Duration + comment: | + WithRefreshInterval specifies the static interval between refreshes + of jwk.Set objects controlled by jwk.Cache. + + Providing this option overrides the adaptive token refreshing based + on Cache-Control/Expires header (and jwk.WithMinRefreshInterval), + and refreshes will *always* happen in this interval. + - ident: MinRefreshInterval + interface: RegisterOption + argument_type: time.Duration + comment: | + WithMinRefreshInterval specifies the minimum refresh interval to be used + when using `jwk.Cache`. This value is ONLY used if you did not specify + a user-supplied static refresh interval via `WithRefreshInterval`. + + This value is used as a fallback value when tokens are refreshed. + + When we fetch the key from a remote URL, we first look at the max-age + directive from Cache-Control response header. If this value is present, + we compare the max-age value and the value specified by this option + and take the larger one. + + Next we check for the Expires header, and similarly if the header is + present, we compare it against the value specified by this option, + and take the larger one. + + Finally, if neither of the above headers are present, we use the + value specified by this option as the next refresh timing + + If unspecified, the minimum refresh interval is 1 hour + - ident: LocalRegistry + option_name: withLocalRegistry + interface: ParseOption + argument_type: '*json.Registry' + comment: This option is only available for internal code. Users don't get to play with it + - ident: PEM + interface: ParseOption + argument_type: bool + comment: WithPEM specifies that the input to `Parse()` is a PEM encoded key. + - ident: FetchWhitelist + interface: FetchOption + argument_type: Whitelist + comment: | + WithFetchWhitelist specifies the Whitelist object to use when + fetching JWKs from a remote source. This option can be passed + to both `jwk.Fetch()`, `jwk.NewCache()`, and `(*jwk.Cache).Configure()` + - ident: IgnoreParseError + interface: ParseOption + argument_type: bool + comment: | + WithIgnoreParseError is only applicable when used with `jwk.Parse()` + (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function + will return an error no matter what the input is. + + DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST. + + The option specifies that errors found during parsing of individual + keys are ignored. For example, if you had keys A, B, C where B is + invalid (e.g. it does not contain the required fields), then the + resulting JWKS will contain keys A and C only. + + This options exists as an escape hatch for those times when a + key in a JWKS that is irrelevant for your use case is causing + your JWKS parsing to fail, and you want to get to the rest of the + keys in the JWKS. + + Again, DO NOT USE unless you have exhausted all other routes. + When you use this option, you will not be able to tell if you are + using a faulty JWKS, except for when there are JSON syntax errors. + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: PostFetcher + interface: RegisterOption + argument_type: PostFetcher + comment: | + WithPostFetcher specifies the PostFetcher object to be used on the + jwk.Set object obtained in `jwk.Cache`. This option can be used + to, for example, modify the jwk.Set to give it key IDs or algorithm + names after it has been fetched and parsed, but before it is cached. + - ident: RefreshWindow + interface: CacheOption + argument_type: time.Duration + comment: | + WithRefreshWindow specifies the interval between checks for refreshes. + + See the documentation in `httprc.WithRefreshWindow` for more details. + - ident: ErrSink + interface: CacheOption + argument_type: ErrSink + comment: | + WithErrSink specifies the `httprc.ErrSink` object that handles errors + that occurred during the cache's execution. + + See the documentation in `httprc.WithErrSink` for more details. diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go new file mode 100644 index 0000000000..17e23a7055 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/options_gen.go @@ -0,0 +1,274 @@ +// This file is auto-generated by internal/cmd/genoptions/main.go. DO NOT EDIT + +package jwk + +import ( + "crypto" + "io/fs" + "time" + + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/option" +) + +type Option = option.Interface + +type AssignKeyIDOption interface { + Option + assignKeyIDOption() +} + +type assignKeyIDOption struct { + Option +} + +func (*assignKeyIDOption) assignKeyIDOption() {} + +// CacheOption is a type of Option that can be passed to the +// `jwk.Cache` object. +type CacheOption interface { + Option + cacheOption() +} + +type cacheOption struct { + Option +} + +func (*cacheOption) cacheOption() {} + +// FetchOption is a type of Option that can be passed to `jwk.Fetch()` +// FetchOption also implements the `CacheOption`, and thus can +// safely be passed to `(*jwk.Cache).Configure()` +type FetchOption interface { + Option + fetchOption() + parseOption() + registerOption() +} + +type fetchOption struct { + Option +} + +func (*fetchOption) fetchOption() {} + +func (*fetchOption) parseOption() {} + +func (*fetchOption) registerOption() {} + +// ParseOption is a type of Option that can be passed to `jwk.Parse()` +// ParseOption also implmentsthe `ReadFileOption` and `CacheOption`, +// and thus safely be passed to `jwk.ReadFile` and `(*jwk.Cache).Configure()` +type ParseOption interface { + Option + fetchOption() + registerOption() + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) fetchOption() {} + +func (*parseOption) registerOption() {} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwk.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// RegisterOption desribes options that can be passed to `(jwk.Cache).Register()` +type RegisterOption interface { + Option + registerOption() +} + +type registerOption struct { + Option +} + +func (*registerOption) registerOption() {} + +type identErrSink struct{} +type identFS struct{} +type identFetchWhitelist struct{} +type identHTTPClient struct{} +type identIgnoreParseError struct{} +type identLocalRegistry struct{} +type identMinRefreshInterval struct{} +type identPEM struct{} +type identPostFetcher struct{} +type identRefreshInterval struct{} +type identRefreshWindow struct{} +type identThumbprintHash struct{} + +func (identErrSink) String() string { + return "WithErrSink" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identFetchWhitelist) String() string { + return "WithFetchWhitelist" +} + +func (identHTTPClient) String() string { + return "WithHTTPClient" +} + +func (identIgnoreParseError) String() string { + return "WithIgnoreParseError" +} + +func (identLocalRegistry) String() string { + return "withLocalRegistry" +} + +func (identMinRefreshInterval) String() string { + return "WithMinRefreshInterval" +} + +func (identPEM) String() string { + return "WithPEM" +} + +func (identPostFetcher) String() string { + return "WithPostFetcher" +} + +func (identRefreshInterval) String() string { + return "WithRefreshInterval" +} + +func (identRefreshWindow) String() string { + return "WithRefreshWindow" +} + +func (identThumbprintHash) String() string { + return "WithThumbprintHash" +} + +// WithErrSink specifies the `httprc.ErrSink` object that handles errors +// that occurred during the cache's execution. +// +// See the documentation in `httprc.WithErrSink` for more details. +func WithErrSink(v ErrSink) CacheOption { + return &cacheOption{option.New(identErrSink{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithFetchWhitelist specifies the Whitelist object to use when +// fetching JWKs from a remote source. This option can be passed +// to both `jwk.Fetch()`, `jwk.NewCache()`, and `(*jwk.Cache).Configure()` +func WithFetchWhitelist(v Whitelist) FetchOption { + return &fetchOption{option.New(identFetchWhitelist{}, v)} +} + +// WithHTTPClient allows users to specify the "net/http".Client object that +// is used when fetching jwk.Set objects. +func WithHTTPClient(v HTTPClient) FetchOption { + return &fetchOption{option.New(identHTTPClient{}, v)} +} + +// WithIgnoreParseError is only applicable when used with `jwk.Parse()` +// (i.e. to parse JWK sets). If passed to `jwk.ParseKey()`, the function +// will return an error no matter what the input is. +// +// DO NOT USE WITHOUT EXHAUSTING ALL OTHER ROUTES FIRST. +// +// The option specifies that errors found during parsing of individual +// keys are ignored. For example, if you had keys A, B, C where B is +// invalid (e.g. it does not contain the required fields), then the +// resulting JWKS will contain keys A and C only. +// +// This options exists as an escape hatch for those times when a +// key in a JWKS that is irrelevant for your use case is causing +// your JWKS parsing to fail, and you want to get to the rest of the +// keys in the JWKS. +// +// Again, DO NOT USE unless you have exhausted all other routes. +// When you use this option, you will not be able to tell if you are +// using a faulty JWKS, except for when there are JSON syntax errors. +func WithIgnoreParseError(v bool) ParseOption { + return &parseOption{option.New(identIgnoreParseError{}, v)} +} + +// This option is only available for internal code. Users don't get to play with it +func withLocalRegistry(v *json.Registry) ParseOption { + return &parseOption{option.New(identLocalRegistry{}, v)} +} + +// WithMinRefreshInterval specifies the minimum refresh interval to be used +// when using `jwk.Cache`. This value is ONLY used if you did not specify +// a user-supplied static refresh interval via `WithRefreshInterval`. +// +// This value is used as a fallback value when tokens are refreshed. +// +// When we fetch the key from a remote URL, we first look at the max-age +// directive from Cache-Control response header. If this value is present, +// we compare the max-age value and the value specified by this option +// and take the larger one. +// +// Next we check for the Expires header, and similarly if the header is +// present, we compare it against the value specified by this option, +// and take the larger one. +// +// Finally, if neither of the above headers are present, we use the +// value specified by this option as the next refresh timing +// +// If unspecified, the minimum refresh interval is 1 hour +func WithMinRefreshInterval(v time.Duration) RegisterOption { + return ®isterOption{option.New(identMinRefreshInterval{}, v)} +} + +// WithPEM specifies that the input to `Parse()` is a PEM encoded key. +func WithPEM(v bool) ParseOption { + return &parseOption{option.New(identPEM{}, v)} +} + +// WithPostFetcher specifies the PostFetcher object to be used on the +// jwk.Set object obtained in `jwk.Cache`. This option can be used +// to, for example, modify the jwk.Set to give it key IDs or algorithm +// names after it has been fetched and parsed, but before it is cached. +func WithPostFetcher(v PostFetcher) RegisterOption { + return ®isterOption{option.New(identPostFetcher{}, v)} +} + +// WithRefreshInterval specifies the static interval between refreshes +// of jwk.Set objects controlled by jwk.Cache. +// +// Providing this option overrides the adaptive token refreshing based +// on Cache-Control/Expires header (and jwk.WithMinRefreshInterval), +// and refreshes will *always* happen in this interval. +func WithRefreshInterval(v time.Duration) RegisterOption { + return ®isterOption{option.New(identRefreshInterval{}, v)} +} + +// WithRefreshWindow specifies the interval between checks for refreshes. +// +// See the documentation in `httprc.WithRefreshWindow` for more details. +func WithRefreshWindow(v time.Duration) CacheOption { + return &cacheOption{option.New(identRefreshWindow{}, v)} +} + +func WithThumbprintHash(v crypto.Hash) AssignKeyIDOption { + return &assignKeyIDOption{option.New(identThumbprintHash{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go new file mode 100644 index 0000000000..5de6b6358e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa.go @@ -0,0 +1,243 @@ +package jwk + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "fmt" + "math/big" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/pool" +) + +func (k *rsaPrivateKey) FromRaw(rawKey *rsa.PrivateKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + d, err := bigIntToBytes(rawKey.D) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.d = d + + l := len(rawKey.Primes) + + if l < 0 /* I know, I'm being paranoid */ || l > 2 { + return fmt.Errorf(`invalid number of primes in rsa.PrivateKey: need 0 to 2, but got %d`, len(rawKey.Primes)) + } + + if l > 0 { + p, err := bigIntToBytes(rawKey.Primes[0]) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.p = p + } + + if l > 1 { + q, err := bigIntToBytes(rawKey.Primes[1]) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.q = q + } + + // dp, dq, qi are optional values + if v, err := bigIntToBytes(rawKey.Precomputed.Dp); err == nil { + k.dp = v + } + if v, err := bigIntToBytes(rawKey.Precomputed.Dq); err == nil { + k.dq = v + } + if v, err := bigIntToBytes(rawKey.Precomputed.Qinv); err == nil { + k.qi = v + } + + // public key part + n, e, err := rsaPublicKeyByteValuesFromRaw(&rawKey.PublicKey) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.n = n + k.e = e + + return nil +} + +func rsaPublicKeyByteValuesFromRaw(rawKey *rsa.PublicKey) ([]byte, []byte, error) { + n, err := bigIntToBytes(rawKey.N) + if err != nil { + return nil, nil, fmt.Errorf(`invalid rsa.PublicKey: %w`, err) + } + + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, uint64(rawKey.E)) + i := 0 + for ; i < len(data); i++ { + if data[i] != 0x0 { + break + } + } + return n, data[i:], nil +} + +func (k *rsaPublicKey) FromRaw(rawKey *rsa.PublicKey) error { + k.mu.Lock() + defer k.mu.Unlock() + + n, e, err := rsaPublicKeyByteValuesFromRaw(rawKey) + if err != nil { + return fmt.Errorf(`invalid rsa.PrivateKey: %w`, err) + } + k.n = n + k.e = e + + return nil +} + +func (k *rsaPrivateKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + var d, q, p big.Int // note: do not use from sync.Pool + + d.SetBytes(k.d) + q.SetBytes(k.q) + p.SetBytes(k.p) + + // optional fields + var dp, dq, qi *big.Int + if len(k.dp) > 0 { + dp = &big.Int{} // note: do not use from sync.Pool + dp.SetBytes(k.dp) + } + + if len(k.dq) > 0 { + dq = &big.Int{} // note: do not use from sync.Pool + dq.SetBytes(k.dq) + } + + if len(k.qi) > 0 { + qi = &big.Int{} // note: do not use from sync.Pool + qi.SetBytes(k.qi) + } + + var key rsa.PrivateKey + + pubk := newRSAPublicKey() + pubk.n = k.n + pubk.e = k.e + if err := pubk.Raw(&key.PublicKey); err != nil { + return fmt.Errorf(`failed to materialize RSA public key: %w`, err) + } + + key.D = &d + key.Primes = []*big.Int{&p, &q} + + if dp != nil { + key.Precomputed.Dp = dp + } + if dq != nil { + key.Precomputed.Dq = dq + } + if qi != nil { + key.Precomputed.Qinv = qi + } + key.Precomputed.CRTValues = []rsa.CRTValue{} + + return blackmagic.AssignIfCompatible(v, &key) +} + +// Raw takes the values stored in the Key object, and creates the +// corresponding *rsa.PublicKey object. +func (k *rsaPublicKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + + var key rsa.PublicKey + + n := pool.GetBigInt() + e := pool.GetBigInt() + defer pool.ReleaseBigInt(e) + + n.SetBytes(k.n) + e.SetBytes(k.e) + + key.N = n + key.E = int(e.Int64()) + + return blackmagic.AssignIfCompatible(v, &key) +} + +func makeRSAPublicKey(v interface { + makePairs() []*HeaderPair +}) (Key, error) { + newKey := newRSAPublicKey() + + // Iterate and copy everything except for the bits that should not be in the public key + for _, pair := range v.makePairs() { + switch pair.Key { + case RSADKey, RSADPKey, RSADQKey, RSAPKey, RSAQKey, RSAQIKey: + continue + default: + //nolint:forcetypeassert + key := pair.Key.(string) + if err := newKey.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, key, err) + } + } + } + + return newKey, nil +} + +func (k *rsaPrivateKey) PublicKey() (Key, error) { + return makeRSAPublicKey(k) +} + +func (k *rsaPublicKey) PublicKey() (Key, error) { + return makeRSAPublicKey(k) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k rsaPrivateKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key rsa.PrivateKey + if err := k.Raw(&key); err != nil { + return nil, fmt.Errorf(`failed to materialize RSA private key: %w`, err) + } + return rsaThumbprint(hash, &key.PublicKey) +} + +func (k rsaPublicKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + + var key rsa.PublicKey + if err := k.Raw(&key); err != nil { + return nil, fmt.Errorf(`failed to materialize RSA public key: %w`, err) + } + return rsaThumbprint(hash, &key) +} + +func rsaThumbprint(hash crypto.Hash, key *rsa.PublicKey) ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.WriteString(`{"e":"`) + buf.WriteString(base64.EncodeUint64ToString(uint64(key.E))) + buf.WriteString(`","kty":"RSA","n":"`) + buf.WriteString(base64.EncodeToString(key.N.Bytes())) + buf.WriteString(`"}`) + + h := hash.New() + if _, err := buf.WriteTo(h); err != nil { + return nil, fmt.Errorf(`failed to write rsaThumbprint: %w`, err) + } + return h.Sum(nil), nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go new file mode 100644 index 0000000000..ce4e400ff8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/rsa_gen.go @@ -0,0 +1,1250 @@ +// This file is auto-generated by jwk/internal/cmd/genheader/main.go. DO NOT EDIT + +package jwk + +import ( + "bytes" + "context" + "crypto/rsa" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +const ( + RSADKey = "d" + RSADPKey = "dp" + RSADQKey = "dq" + RSAEKey = "e" + RSANKey = "n" + RSAPKey = "p" + RSAQIKey = "qi" + RSAQKey = "q" +) + +type RSAPublicKey interface { + Key + FromRaw(*rsa.PublicKey) error + E() []byte + N() []byte +} + +type rsaPublicKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + e []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + n []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ RSAPublicKey = &rsaPublicKey{} +var _ Key = &rsaPublicKey{} + +func newRSAPublicKey() *rsaPublicKey { + return &rsaPublicKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h rsaPublicKey) KeyType() jwa.KeyType { + return jwa.RSA +} + +func (h *rsaPublicKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *rsaPublicKey) E() []byte { + return h.e +} + +func (h *rsaPublicKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *rsaPublicKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *rsaPublicKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *rsaPublicKey) N() []byte { + return h.n +} + +func (h *rsaPublicKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *rsaPublicKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *rsaPublicKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *rsaPublicKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *rsaPublicKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.RSA}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.e != nil { + pairs = append(pairs, &HeaderPair{Key: RSAEKey, Value: h.e}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.n != nil { + pairs = append(pairs, &HeaderPair{Key: RSANKey, Value: h.n}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *rsaPublicKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *rsaPublicKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case RSAEKey: + if h.e == nil { + return nil, false + } + return h.e, true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case RSANKey: + if h.n == nil { + return nil, false + } + return h.n, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *rsaPublicKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *rsaPublicKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case RSAEKey: + if v, ok := value.([]byte); ok { + h.e = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case RSANKey: + if v, ok := value.([]byte); ok { + h.n = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *rsaPublicKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case RSAEKey: + k.e = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case RSANKey: + k.n = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *rsaPublicKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *rsaPublicKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *rsaPublicKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *rsaPublicKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.e = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.n = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.RSA.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case RSAEKey: + if err := json.AssignNextBytesToken(&h.e, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case RSANKey: + if err := json.AssignNextBytesToken(&h.n, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.e == nil { + return fmt.Errorf(`required field e is missing`) + } + if h.n == nil { + return fmt.Errorf(`required field n is missing`) + } + return nil +} + +func (h rsaPublicKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 10) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *rsaPublicKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *rsaPublicKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *rsaPublicKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} + +type RSAPrivateKey interface { + Key + FromRaw(*rsa.PrivateKey) error + D() []byte + DP() []byte + DQ() []byte + E() []byte + N() []byte + P() []byte + Q() []byte + QI() []byte +} + +type rsaPrivateKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + d []byte + dp []byte + dq []byte + e []byte + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + n []byte + p []byte + q []byte + qi []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ RSAPrivateKey = &rsaPrivateKey{} +var _ Key = &rsaPrivateKey{} + +func newRSAPrivateKey() *rsaPrivateKey { + return &rsaPrivateKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h rsaPrivateKey) KeyType() jwa.KeyType { + return jwa.RSA +} + +func (h *rsaPrivateKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *rsaPrivateKey) D() []byte { + return h.d +} + +func (h *rsaPrivateKey) DP() []byte { + return h.dp +} + +func (h *rsaPrivateKey) DQ() []byte { + return h.dq +} + +func (h *rsaPrivateKey) E() []byte { + return h.e +} + +func (h *rsaPrivateKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *rsaPrivateKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *rsaPrivateKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *rsaPrivateKey) N() []byte { + return h.n +} + +func (h *rsaPrivateKey) P() []byte { + return h.p +} + +func (h *rsaPrivateKey) Q() []byte { + return h.q +} + +func (h *rsaPrivateKey) QI() []byte { + return h.qi +} + +func (h *rsaPrivateKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *rsaPrivateKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *rsaPrivateKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *rsaPrivateKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *rsaPrivateKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.RSA}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.d != nil { + pairs = append(pairs, &HeaderPair{Key: RSADKey, Value: h.d}) + } + if h.dp != nil { + pairs = append(pairs, &HeaderPair{Key: RSADPKey, Value: h.dp}) + } + if h.dq != nil { + pairs = append(pairs, &HeaderPair{Key: RSADQKey, Value: h.dq}) + } + if h.e != nil { + pairs = append(pairs, &HeaderPair{Key: RSAEKey, Value: h.e}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.n != nil { + pairs = append(pairs, &HeaderPair{Key: RSANKey, Value: h.n}) + } + if h.p != nil { + pairs = append(pairs, &HeaderPair{Key: RSAPKey, Value: h.p}) + } + if h.q != nil { + pairs = append(pairs, &HeaderPair{Key: RSAQKey, Value: h.q}) + } + if h.qi != nil { + pairs = append(pairs, &HeaderPair{Key: RSAQIKey, Value: h.qi}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *rsaPrivateKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *rsaPrivateKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case RSADKey: + if h.d == nil { + return nil, false + } + return h.d, true + case RSADPKey: + if h.dp == nil { + return nil, false + } + return h.dp, true + case RSADQKey: + if h.dq == nil { + return nil, false + } + return h.dq, true + case RSAEKey: + if h.e == nil { + return nil, false + } + return h.e, true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case RSANKey: + if h.n == nil { + return nil, false + } + return h.n, true + case RSAPKey: + if h.p == nil { + return nil, false + } + return h.p, true + case RSAQKey: + if h.q == nil { + return nil, false + } + return h.q, true + case RSAQIKey: + if h.qi == nil { + return nil, false + } + return h.qi, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *rsaPrivateKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *rsaPrivateKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case RSADKey: + if v, ok := value.([]byte); ok { + h.d = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADKey, value) + case RSADPKey: + if v, ok := value.([]byte); ok { + h.dp = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADPKey, value) + case RSADQKey: + if v, ok := value.([]byte); ok { + h.dq = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSADQKey, value) + case RSAEKey: + if v, ok := value.([]byte); ok { + h.e = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAEKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case RSANKey: + if v, ok := value.([]byte); ok { + h.n = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSANKey, value) + case RSAPKey: + if v, ok := value.([]byte); ok { + h.p = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAPKey, value) + case RSAQKey: + if v, ok := value.([]byte); ok { + h.q = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAQKey, value) + case RSAQIKey: + if v, ok := value.([]byte); ok { + h.qi = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, RSAQIKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *rsaPrivateKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case RSADKey: + k.d = nil + case RSADPKey: + k.dp = nil + case RSADQKey: + k.dq = nil + case RSAEKey: + k.e = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case RSANKey: + k.n = nil + case RSAPKey: + k.p = nil + case RSAQKey: + k.q = nil + case RSAQIKey: + k.qi = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *rsaPrivateKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *rsaPrivateKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *rsaPrivateKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *rsaPrivateKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.d = nil + h.dp = nil + h.dq = nil + h.e = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.n = nil + h.p = nil + h.q = nil + h.qi = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.RSA.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case RSADKey: + if err := json.AssignNextBytesToken(&h.d, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADKey, err) + } + case RSADPKey: + if err := json.AssignNextBytesToken(&h.dp, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADPKey, err) + } + case RSADQKey: + if err := json.AssignNextBytesToken(&h.dq, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSADQKey, err) + } + case RSAEKey: + if err := json.AssignNextBytesToken(&h.e, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAEKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case RSANKey: + if err := json.AssignNextBytesToken(&h.n, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSANKey, err) + } + case RSAPKey: + if err := json.AssignNextBytesToken(&h.p, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAPKey, err) + } + case RSAQKey: + if err := json.AssignNextBytesToken(&h.q, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQKey, err) + } + case RSAQIKey: + if err := json.AssignNextBytesToken(&h.qi, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, RSAQIKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.d == nil { + return fmt.Errorf(`required field d is missing`) + } + if h.e == nil { + return fmt.Errorf(`required field e is missing`) + } + if h.n == nil { + return fmt.Errorf(`required field n is missing`) + } + return nil +} + +func (h rsaPrivateKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 16) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *rsaPrivateKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *rsaPrivateKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *rsaPrivateKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go new file mode 100644 index 0000000000..ab535104db --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/set.go @@ -0,0 +1,338 @@ +package jwk + +import ( + "bytes" + "context" + "fmt" + "sort" + + "github.com/lestrrat-go/iter/arrayiter" + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" +) + +const keysKey = `keys` // appease linter + +// NewSet creates and empty `jwk.Set` object +func NewSet() Set { + return &set{ + privateParams: make(map[string]interface{}), + } +} + +func (s *set) Set(n string, v interface{}) error { + s.mu.RLock() + defer s.mu.RUnlock() + + if n == keysKey { + vl, ok := v.([]Key) + if !ok { + return fmt.Errorf(`value for field "keys" must be []jwk.Key`) + } + s.keys = vl + return nil + } + + s.privateParams[n] = v + return nil +} + +func (s *set) Get(n string) (interface{}, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + v, ok := s.privateParams[n] + return v, ok +} + +func (s *set) Key(idx int) (Key, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + if idx >= 0 && idx < len(s.keys) { + return s.keys[idx], true + } + return nil, false +} + +func (s *set) Len() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.keys) +} + +// indexNL is Index(), but without the locking +func (s *set) indexNL(key Key) int { + for i, k := range s.keys { + if k == key { + return i + } + } + return -1 +} + +func (s *set) Index(key Key) int { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.indexNL(key) +} + +func (s *set) AddKey(key Key) error { + s.mu.Lock() + defer s.mu.Unlock() + + if i := s.indexNL(key); i > -1 { + return fmt.Errorf(`(jwk.Set).AddKey: key already exists`) + } + s.keys = append(s.keys, key) + return nil +} + +func (s *set) Remove(name string) error { + s.mu.Lock() + defer s.mu.Unlock() + + delete(s.privateParams, name) + return nil +} + +func (s *set) RemoveKey(key Key) error { + s.mu.Lock() + defer s.mu.Unlock() + + for i, k := range s.keys { + if k == key { + switch i { + case 0: + s.keys = s.keys[1:] + case len(s.keys) - 1: + s.keys = s.keys[:i] + default: + s.keys = append(s.keys[:i], s.keys[i+1:]...) + } + return nil + } + } + return fmt.Errorf(`(jwk.Set).RemoveKey: specified key does not exist in set`) +} + +func (s *set) Clear() error { + s.mu.Lock() + defer s.mu.Unlock() + + s.keys = nil + s.privateParams = make(map[string]interface{}) + return nil +} + +func (s *set) Keys(ctx context.Context) KeyIterator { + ch := make(chan *KeyPair, s.Len()) + go iterate(ctx, s.keys, ch) + return arrayiter.New(ch) +} + +func iterate(ctx context.Context, keys []Key, ch chan *KeyPair) { + defer close(ch) + + for i, key := range keys { + pair := &KeyPair{Index: i, Value: key} + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } +} + +func (s *set) MarshalJSON() ([]byte, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + enc := json.NewEncoder(buf) + + fields := []string{keysKey} + for k := range s.privateParams { + fields = append(fields, k) + } + sort.Strings(fields) + + buf.WriteByte('{') + for i, field := range fields { + if i > 0 { + buf.WriteByte(',') + } + fmt.Fprintf(buf, `%q:`, field) + if field != keysKey { + if err := enc.Encode(s.privateParams[field]); err != nil { + return nil, fmt.Errorf(`failed to marshal field %q: %w`, field, err) + } + } else { + buf.WriteByte('[') + for j, k := range s.keys { + if j > 0 { + buf.WriteByte(',') + } + if err := enc.Encode(k); err != nil { + return nil, fmt.Errorf(`failed to marshal key #%d: %w`, i, err) + } + } + buf.WriteByte(']') + } + } + buf.WriteByte('}') + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (s *set) UnmarshalJSON(data []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + s.privateParams = make(map[string]interface{}) + s.keys = nil + + var options []ParseOption + var ignoreParseError bool + if dc := s.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + options = append(options, withLocalRegistry(localReg)) + } + ignoreParseError = dc.IgnoreParseError() + } + + var sawKeysField bool + dec := json.NewDecoder(bytes.NewReader(data)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: + switch tok { + case "keys": + sawKeysField = true + var list []json.RawMessage + if err := dec.Decode(&list); err != nil { + return fmt.Errorf(`failed to decode "keys": %w`, err) + } + + for i, keysrc := range list { + key, err := ParseKey(keysrc, options...) + if err != nil { + if !ignoreParseError { + return fmt.Errorf(`failed to decode key #%d in "keys": %w`, i, err) + } + continue + } + s.keys = append(s.keys, key) + } + default: + var v interface{} + if err := dec.Decode(&v); err != nil { + return fmt.Errorf(`failed to decode value for key %q: %w`, tok, err) + } + s.privateParams[tok] = v + } + } + } + + // This is really silly, but we can only detect the + // lack of the "keys" field after going through the + // entire object once + // Not checking for len(s.keys) == 0, because it could be + // an empty key set + if !sawKeysField { + key, err := ParseKey(data, options...) + if err != nil { + return fmt.Errorf(`failed to parse sole key in key set`) + } + s.keys = append(s.keys, key) + } + return nil +} + +func (s *set) LookupKeyID(kid string) (Key, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + + n := s.Len() + for i := 0; i < n; i++ { + key, ok := s.Key(i) + if !ok { + return nil, false + } + if key.KeyID() == kid { + return key, true + } + } + return nil, false +} + +func (s *set) DecodeCtx() DecodeCtx { + s.mu.RLock() + defer s.mu.RUnlock() + return s.dc +} + +func (s *set) SetDecodeCtx(dc DecodeCtx) { + s.mu.Lock() + defer s.mu.Unlock() + s.dc = dc +} + +func (s *set) Clone() (Set, error) { + s2 := &set{} + + s.mu.RLock() + defer s.mu.RUnlock() + + s2.keys = make([]Key, len(s.keys)) + copy(s2.keys, s.keys) + return s2, nil +} + +func (s *set) makePairs() []*HeaderPair { + pairs := make([]*HeaderPair, 0, len(s.privateParams)) + for k, v := range s.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + sort.Slice(pairs, func(i, j int) bool { + //nolint:forcetypeassert + return pairs[i].Key.(string) < pairs[j].Key.(string) + }) + return pairs +} + +func (s *set) Iterate(ctx context.Context) HeaderIterator { + pairs := s.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go new file mode 100644 index 0000000000..d2498e3341 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric.go @@ -0,0 +1,60 @@ +package jwk + +import ( + "crypto" + "fmt" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" +) + +func (k *symmetricKey) FromRaw(rawKey []byte) error { + k.mu.Lock() + defer k.mu.Unlock() + + if len(rawKey) == 0 { + return fmt.Errorf(`non-empty []byte key required`) + } + + k.octets = rawKey + + return nil +} + +// Raw returns the octets for this symmetric key. +// Since this is a symmetric key, this just calls Octets +func (k *symmetricKey) Raw(v interface{}) error { + k.mu.RLock() + defer k.mu.RUnlock() + return blackmagic.AssignIfCompatible(v, k.octets) +} + +// Thumbprint returns the JWK thumbprint using the indicated +// hashing algorithm, according to RFC 7638 +func (k *symmetricKey) Thumbprint(hash crypto.Hash) ([]byte, error) { + k.mu.RLock() + defer k.mu.RUnlock() + var octets []byte + if err := k.Raw(&octets); err != nil { + return nil, fmt.Errorf(`failed to materialize symmetric key: %w`, err) + } + + h := hash.New() + fmt.Fprint(h, `{"k":"`) + fmt.Fprint(h, base64.EncodeToString(octets)) + fmt.Fprint(h, `","kty":"oct"}`) + return h.Sum(nil), nil +} + +func (k *symmetricKey) PublicKey() (Key, error) { + newKey := newSymmetricKey() + + for _, pair := range k.makePairs() { + //nolint:forcetypeassert + key := pair.Key.(string) + if err := newKey.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set field %q: %w`, key, err) + } + } + return newKey, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go new file mode 100644 index 0000000000..6a965193c1 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/symmetric_gen.go @@ -0,0 +1,520 @@ +// This file is auto-generated by jwk/internal/cmd/genheader/main.go. DO NOT EDIT + +package jwk + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +const ( + SymmetricOctetsKey = "k" +) + +type SymmetricKey interface { + Key + FromRaw([]byte) error + Octets() []byte +} + +type symmetricKey struct { + algorithm *jwa.KeyAlgorithm // https://tools.ietf.org/html/rfc7517#section-4.4 + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + keyOps *KeyOperationList // https://tools.ietf.org/html/rfc7517#section-4.3 + keyUsage *string // https://tools.ietf.org/html/rfc7517#section-4.2 + octets []byte + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc json.DecodeCtx +} + +var _ SymmetricKey = &symmetricKey{} +var _ Key = &symmetricKey{} + +func newSymmetricKey() *symmetricKey { + return &symmetricKey{ + mu: &sync.RWMutex{}, + privateParams: make(map[string]interface{}), + } +} + +func (h symmetricKey) KeyType() jwa.KeyType { + return jwa.OctetSeq +} + +func (h *symmetricKey) Algorithm() jwa.KeyAlgorithm { + if h.algorithm != nil { + return *(h.algorithm) + } + return jwa.InvalidKeyAlgorithm("") +} + +func (h *symmetricKey) KeyID() string { + if h.keyID != nil { + return *(h.keyID) + } + return "" +} + +func (h *symmetricKey) KeyOps() KeyOperationList { + if h.keyOps != nil { + return *(h.keyOps) + } + return nil +} + +func (h *symmetricKey) KeyUsage() string { + if h.keyUsage != nil { + return *(h.keyUsage) + } + return "" +} + +func (h *symmetricKey) Octets() []byte { + return h.octets +} + +func (h *symmetricKey) X509CertChain() *cert.Chain { + return h.x509CertChain +} + +func (h *symmetricKey) X509CertThumbprint() string { + if h.x509CertThumbprint != nil { + return *(h.x509CertThumbprint) + } + return "" +} + +func (h *symmetricKey) X509CertThumbprintS256() string { + if h.x509CertThumbprintS256 != nil { + return *(h.x509CertThumbprintS256) + } + return "" +} + +func (h *symmetricKey) X509URL() string { + if h.x509URL != nil { + return *(h.x509URL) + } + return "" +} + +func (h *symmetricKey) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + + var pairs []*HeaderPair + pairs = append(pairs, &HeaderPair{Key: "kty", Value: jwa.OctetSeq}) + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.keyOps != nil { + pairs = append(pairs, &HeaderPair{Key: KeyOpsKey, Value: *(h.keyOps)}) + } + if h.keyUsage != nil { + pairs = append(pairs, &HeaderPair{Key: KeyUsageKey, Value: *(h.keyUsage)}) + } + if h.octets != nil { + pairs = append(pairs, &HeaderPair{Key: SymmetricOctetsKey, Value: h.octets}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + return pairs +} + +func (h *symmetricKey) PrivateParams() map[string]interface{} { + return h.privateParams +} + +func (h *symmetricKey) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case KeyTypeKey: + return h.KeyType(), true + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case KeyOpsKey: + if h.keyOps == nil { + return nil, false + } + return *(h.keyOps), true + case KeyUsageKey: + if h.keyUsage == nil { + return nil, false + } + return *(h.keyUsage), true + case SymmetricOctetsKey: + if h.octets == nil { + return nil, false + } + return h.octets, true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *symmetricKey) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *symmetricKey) setNoLock(name string, value interface{}) error { + switch name { + case "kty": + return nil + case AlgorithmKey: + switch v := value.(type) { + case string, jwa.SignatureAlgorithm, jwa.ContentEncryptionAlgorithm: + var tmp = jwa.KeyAlgorithmFrom(v) + h.algorithm = &tmp + case fmt.Stringer: + s := v.String() + var tmp = jwa.KeyAlgorithmFrom(s) + h.algorithm = &tmp + default: + return fmt.Errorf(`invalid type for %s key: %T`, AlgorithmKey, value) + } + return nil + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case KeyOpsKey: + var acceptor KeyOperationList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, KeyOpsKey, err) + } + h.keyOps = &acceptor + return nil + case KeyUsageKey: + switch v := value.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + tmp := v.String() + h.keyUsage = &tmp + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case string: + h.keyUsage = &v + default: + return fmt.Errorf(`invalid key usage type %s`, v) + } + case SymmetricOctetsKey: + if v, ok := value.([]byte); ok { + h.octets = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, SymmetricOctetsKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (k *symmetricKey) Remove(key string) error { + k.mu.Lock() + defer k.mu.Unlock() + switch key { + case AlgorithmKey: + k.algorithm = nil + case KeyIDKey: + k.keyID = nil + case KeyOpsKey: + k.keyOps = nil + case KeyUsageKey: + k.keyUsage = nil + case SymmetricOctetsKey: + k.octets = nil + case X509CertChainKey: + k.x509CertChain = nil + case X509CertThumbprintKey: + k.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + k.x509CertThumbprintS256 = nil + case X509URLKey: + k.x509URL = nil + default: + delete(k.privateParams, key) + } + return nil +} + +func (k *symmetricKey) Clone() (Key, error) { + return cloneKey(k) +} + +func (k *symmetricKey) DecodeCtx() json.DecodeCtx { + k.mu.RLock() + defer k.mu.RUnlock() + return k.dc +} + +func (k *symmetricKey) SetDecodeCtx(dc json.DecodeCtx) { + k.mu.Lock() + defer k.mu.Unlock() + k.dc = dc +} + +func (h *symmetricKey) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.algorithm = nil + h.keyID = nil + h.keyOps = nil + h.keyUsage = nil + h.octets = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case KeyTypeKey: + val, err := json.ReadNextStringToken(dec) + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + if val != jwa.OctetSeq.String() { + return fmt.Errorf(`invalid kty value for RSAPublicKey (%s)`, val) + } + case AlgorithmKey: + var s string + if err := dec.Decode(&s); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + alg := jwa.KeyAlgorithmFrom(s) + h.algorithm = &alg + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case KeyOpsKey: + var decoded KeyOperationList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyOpsKey, err) + } + h.keyOps = &decoded + case KeyUsageKey: + if err := json.AssignNextStringToken(&h.keyUsage, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyUsageKey, err) + } + case SymmetricOctetsKey: + if err := json.AssignNextBytesToken(&h.octets, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, SymmetricOctetsKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + if dc := h.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + h.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + if h.octets == nil { + return fmt.Errorf(`required field k is missing`) + } + return nil +} + +func (h symmetricKey) MarshalJSON() ([]byte, error) { + data := make(map[string]interface{}) + fields := make([]string, 0, 9) + for _, pair := range h.makePairs() { + fields = append(fields, pair.Key.(string)) + data[pair.Key.(string)] = pair.Value + } + + sort.Strings(fields) + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, f := range fields { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + v := data[f] + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (h *symmetricKey) Iterate(ctx context.Context) HeaderIterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *symmetricKey) Walk(ctx context.Context, visitor HeaderVisitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *symmetricKey) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go new file mode 100644 index 0000000000..c21892395d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/usage.go @@ -0,0 +1,30 @@ +package jwk + +import "fmt" + +func (k KeyUsageType) String() string { + return string(k) +} + +func (k *KeyUsageType) Accept(v interface{}) error { + switch v := v.(type) { + case KeyUsageType: + switch v { + case ForSignature, ForEncryption: + *k = v + return nil + default: + return fmt.Errorf("invalid key usage type %s", v) + } + case string: + switch v { + case ForSignature.String(), ForEncryption.String(): + *k = KeyUsageType(v) + return nil + default: + return fmt.Errorf("invalid key usage type %s", v) + } + } + + return fmt.Errorf("invalid value for key usage type %s", v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go b/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go new file mode 100644 index 0000000000..6b0180d307 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwk/whitelist.go @@ -0,0 +1,69 @@ +package jwk + +import "regexp" + +// InsecureWhitelist allows any URLs to be fetched. This is the default +// behavior of `jwk.Fetch()`, but this exists to allow other libraries +// (such as jws, via jws.VerifyAuto) and users to be able to explicitly +// state that they intend to not check the URLs that are being fetched +type InsecureWhitelist struct{} + +func (InsecureWhitelist) IsAllowed(string) bool { + return true +} + +// RegexpWhitelist is a jwk.Whitelist object comprised of a list of *regexp.Regexp +// objects. All entries in the list are tried until one matches. If none of the +// *regexp.Regexp objects match, then the URL is deemed unallowed. +type RegexpWhitelist struct { + patterns []*regexp.Regexp +} + +func NewRegexpWhitelist() *RegexpWhitelist { + return &RegexpWhitelist{} +} + +func (w *RegexpWhitelist) Add(pat *regexp.Regexp) *RegexpWhitelist { + w.patterns = append(w.patterns, pat) + return w +} + +// IsAlloed returns true if any of the patterns in the whitelist +// returns true. +func (w *RegexpWhitelist) IsAllowed(u string) bool { + for _, pat := range w.patterns { + if pat.MatchString(u) { + return true + } + } + return false +} + +// MapWhitelist is a jwk.Whitelist object comprised of a map of strings. +// If the URL exists in the map, then the URL is allowed to be fetched. +type MapWhitelist struct { + store map[string]struct{} +} + +func NewMapWhitelist() *MapWhitelist { + return &MapWhitelist{store: make(map[string]struct{})} +} + +func (w *MapWhitelist) Add(pat string) *MapWhitelist { + w.store[pat] = struct{}{} + return w +} + +func (w *MapWhitelist) IsAllowed(u string) bool { + _, b := w.store[u] + return b +} + +// WhitelistFunc is a jwk.Whitelist object based on a function. +// You can perform any sort of check against the given URL to determine +// if it can be fetched or not. +type WhitelistFunc func(string) bool + +func (w WhitelistFunc) IsAllowed(u string) bool { + return w(u) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md new file mode 100644 index 0000000000..470842ef38 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/README.md @@ -0,0 +1,111 @@ +# JWS [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jws.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jws) + +Package jws implements JWS as described in [RFC7515](https://tools.ietf.org/html/rfc7515) and [RFC7797](https://tools.ietf.org/html/rfc7797) + +* Parse and generate compact or JSON serializations +* Sign and verify arbitrary payload +* Use any of the keys supported in [github.com/lestrrat-go/jwx/v2/jwk](../jwk) +* Add arbitrary fields in the JWS object +* Ability to add/replace existing signature methods +* Respect "b64" settings for RFC7797 + +How-to style documentation can be found in the [docs directory](../docs). + +Examples are located in the examples directory ([jws_example_test.go](../examples/jws_example_test.go)) + +Supported signature algorithms: + +| Algorithm | Supported? | Constant in [jwa](../jwa) | +|:----------------------------------------|:-----------|:-------------------------| +| HMAC using SHA-256 | YES | jwa.HS256 | +| HMAC using SHA-384 | YES | jwa.HS384 | +| HMAC using SHA-512 | YES | jwa.HS512 | +| RSASSA-PKCS-v1.5 using SHA-256 | YES | jwa.RS256 | +| RSASSA-PKCS-v1.5 using SHA-384 | YES | jwa.RS384 | +| RSASSA-PKCS-v1.5 using SHA-512 | YES | jwa.RS512 | +| ECDSA using P-256 and SHA-256 | YES | jwa.ES256 | +| ECDSA using P-384 and SHA-384 | YES | jwa.ES384 | +| ECDSA using P-521 and SHA-512 | YES | jwa.ES512 | +| ECDSA using secp256k1 and SHA-256 (2) | YES | jwa.ES256K | +| RSASSA-PSS using SHA256 and MGF1-SHA256 | YES | jwa.PS256 | +| RSASSA-PSS using SHA384 and MGF1-SHA384 | YES | jwa.PS384 | +| RSASSA-PSS using SHA512 and MGF1-SHA512 | YES | jwa.PS512 | +| EdDSA (1) | YES | jwa.EdDSA | + +* Note 1: Experimental +* Note 2: Experimental, and must be toggled using `-tags jwx_es256k` build tag + +# SYNOPSIS + +## Sign and verify arbitrary data + +```go +import( + "crypto/rand" + "crypto/rsa" + "log" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jws" +) + +func main() { + privkey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + buf, err := jws.Sign([]byte("Lorem ipsum"), jws.WithKey(jwa.RS256, privkey)) + if err != nil { + log.Printf("failed to created JWS message: %s", err) + return + } + + // When you receive a JWS message, you can verify the signature + // and grab the payload sent in the message in one go: + verified, err := jws.Verify(buf, jws.WithKey(jwa.RS256, &privkey.PublicKey)) + if err != nil { + log.Printf("failed to verify message: %s", err) + return + } + + log.Printf("signed message verified! -> %s", verified) +} +``` + +## Programatically manipulate `jws.Message` + +```go +func ExampleMessage() { + // initialization for the following variables have been omitted. + // please see jws_example_test.go for details + var decodedPayload, decodedSig1, decodedSig2 []byte + var public1, protected1, public2, protected2 jws.Header + + // Construct a message. DO NOT use values that are base64 encoded + m := jws.NewMessage(). + SetPayload(decodedPayload). + AppendSignature( + jws.NewSignature(). + SetSignature(decodedSig1). + SetProtectedHeaders(public1). + SetPublicHeaders(protected1), + ). + AppendSignature( + jws.NewSignature(). + SetSignature(decodedSig2). + SetProtectedHeaders(public2). + SetPublicHeaders(protected2), + ) + + buf, err := json.MarshalIndent(m, "", " ") + if err != nil { + fmt.Printf("%s\n", err) + return + } + + _ = buf +} +``` + diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go new file mode 100644 index 0000000000..a2d644e438 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/ecdsa.go @@ -0,0 +1,197 @@ +package jws + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "encoding/asn1" + "fmt" + "math/big" + + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +var ecdsaSigners map[jwa.SignatureAlgorithm]*ecdsaSigner +var ecdsaVerifiers map[jwa.SignatureAlgorithm]*ecdsaVerifier + +func init() { + algs := map[jwa.SignatureAlgorithm]crypto.Hash{ + jwa.ES256: crypto.SHA256, + jwa.ES384: crypto.SHA384, + jwa.ES512: crypto.SHA512, + jwa.ES256K: crypto.SHA256, + } + ecdsaSigners = make(map[jwa.SignatureAlgorithm]*ecdsaSigner) + ecdsaVerifiers = make(map[jwa.SignatureAlgorithm]*ecdsaVerifier) + + for alg, hash := range algs { + ecdsaSigners[alg] = &ecdsaSigner{ + alg: alg, + hash: hash, + } + ecdsaVerifiers[alg] = &ecdsaVerifier{ + alg: alg, + hash: hash, + } + } +} + +func newECDSASigner(alg jwa.SignatureAlgorithm) Signer { + return ecdsaSigners[alg] +} + +// ecdsaSigners are immutable. +type ecdsaSigner struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash +} + +func (es ecdsaSigner) Algorithm() jwa.SignatureAlgorithm { + return es.alg +} + +func (es *ecdsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + h := es.hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + + signer, ok := key.(crypto.Signer) + if ok { + switch key.(type) { + case ecdsa.PrivateKey, *ecdsa.PrivateKey: + // if it's a ecdsa.PrivateKey, it's more efficient to + // go through the non-crypto.Signer route. Set ok to false + ok = false + } + } + + var r, s *big.Int + var curveBits int + if ok { + signed, err := signer.Sign(rand.Reader, h.Sum(nil), es.hash) + if err != nil { + return nil, err + } + + var p struct { + R *big.Int + S *big.Int + } + if _, err := asn1.Unmarshal(signed, &p); err != nil { + return nil, fmt.Errorf(`failed to unmarshal ASN1 encoded signature: %w`, err) + } + + // Okay, this is silly, but hear me out. When we use the + // crypto.Signer interface, the PrivateKey is hidden. + // But we need some information about the key (it's bit size). + // + // So while silly, we're going to have to make another call + // here and fetch the Public key. + // This probably means that this should be cached some where. + cpub := signer.Public() + pubkey, ok := cpub.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf(`expected *ecdsa.PublicKey, got %T`, pubkey) + } + curveBits = pubkey.Curve.Params().BitSize + + r = p.R + s = p.S + } else { + var privkey ecdsa.PrivateKey + if err := keyconv.ECDSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve ecdsa.PrivateKey out of %T: %w`, key, err) + } + curveBits = privkey.Curve.Params().BitSize + rtmp, stmp, err := ecdsa.Sign(rand.Reader, &privkey, h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf(`failed to sign payload using ecdsa: %w`, err) + } + r = rtmp + s = stmp + } + + keyBytes := curveBits / 8 + // Curve bits do not need to be a multiple of 8. + if curveBits%8 > 0 { + keyBytes++ + } + + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + return out, nil +} + +// ecdsaVerifiers are immutable. +type ecdsaVerifier struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash +} + +func newECDSAVerifier(alg jwa.SignatureAlgorithm) Verifier { + return ecdsaVerifiers[alg] +} + +func (v ecdsaVerifier) Algorithm() jwa.SignatureAlgorithm { + return v.alg +} + +func (v *ecdsaVerifier) Verify(payload []byte, signature []byte, key interface{}) error { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey ecdsa.PublicKey + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case ecdsa.PublicKey: + pubkey = cpub + case *ecdsa.PublicKey: + pubkey = *cpub + default: + return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + if err := keyconv.ECDSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve ecdsa.PublicKey out of %T: %w`, key, err) + } + } + + if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) { + return fmt.Errorf(`public key used does not contain a point (X,Y) on the curve`) + } + + r := pool.GetBigInt() + s := pool.GetBigInt() + defer pool.ReleaseBigInt(r) + defer pool.ReleaseBigInt(s) + + n := len(signature) / 2 + r.SetBytes(signature[:n]) + s.SetBytes(signature[n:]) + + h := v.hash.New() + if _, err := h.Write(payload); err != nil { + return fmt.Errorf(`failed to write payload using ecdsa: %w`, err) + } + + if !ecdsa.Verify(&pubkey, h.Sum(nil), r, s) { + return fmt.Errorf(`failed to verify signature using ecdsa`) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go new file mode 100644 index 0000000000..78c1a2d68d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/eddsa.go @@ -0,0 +1,73 @@ +package jws + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +type eddsaSigner struct{} + +func newEdDSASigner() Signer { + return &eddsaSigner{} +} + +func (s eddsaSigner) Algorithm() jwa.SignatureAlgorithm { + return jwa.EdDSA +} + +func (s eddsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + // The ed25519.PrivateKey object implements crypto.Signer, so we should + // simply accept a crypto.Signer here. + signer, ok := key.(crypto.Signer) + if !ok { + // This fallback exists for cases when jwk.Key was passed, or + // users gave us a pointer instead of non-pointer, etc. + var privkey ed25519.PrivateKey + if err := keyconv.Ed25519PrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve ed25519.PrivateKey out of %T: %w`, key, err) + } + signer = privkey + } + return signer.Sign(rand.Reader, payload, crypto.Hash(0)) +} + +type eddsaVerifier struct{} + +func newEdDSAVerifier() Verifier { + return &eddsaVerifier{} +} + +func (v eddsaVerifier) Verify(payload, signature []byte, key interface{}) (err error) { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey ed25519.PublicKey + signer, ok := key.(crypto.Signer) + if ok { + v := signer.Public() + pubkey, ok = v.(ed25519.PublicKey) + if !ok { + return fmt.Errorf(`expected crypto.Signer.Public() to return ed25519.PublicKey, but got %T`, v) + } + } else { + if err := keyconv.Ed25519PublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve ed25519.PublicKey out of %T: %w`, key, err) + } + } + + if !ed25519.Verify(pubkey, payload, signature) { + return fmt.Errorf(`failed to match EdDSA signature`) + } + + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go new file mode 100644 index 0000000000..c5043805a6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/es256k.go @@ -0,0 +1,12 @@ +//go:build jwx_es256k +// +build jwx_es256k + +package jws + +import ( + "github.com/lestrrat-go/jwx/v2/jwa" +) + +func init() { + addAlgorithmForKeyType(jwa.EC, jwa.ES256K) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go new file mode 100644 index 0000000000..dce72895e8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers.go @@ -0,0 +1,71 @@ +package jws + +import ( + "context" + "fmt" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" +) + +// Iterate returns a channel that successively returns all the +// header name and values. +func (h *stdHeaders) Iterate(ctx context.Context) Iterator { + pairs := h.makePairs() + ch := make(chan *HeaderPair, len(pairs)) + go func(ctx context.Context, ch chan *HeaderPair, pairs []*HeaderPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (h *stdHeaders) Walk(ctx context.Context, visitor Visitor) error { + return iter.WalkMap(ctx, h, visitor) +} + +func (h *stdHeaders) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, h) +} + +func (h *stdHeaders) Copy(ctx context.Context, dst Headers) error { + for _, pair := range h.makePairs() { + //nolint:forcetypeassert + key := pair.Key.(string) + if err := dst.Set(key, pair.Value); err != nil { + return fmt.Errorf(`failed to set header %q: %w`, key, err) + } + } + return nil +} + +// mergeHeaders merges two headers, and works even if the first Header +// object is nil. This is not exported because ATM it felt like this +// function is not frequently used, and MergeHeaders seemed a clunky name +func mergeHeaders(ctx context.Context, h1, h2 Headers) (Headers, error) { + h3 := NewHeaders() + + if h1 != nil { + if err := h1.Copy(ctx, h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from first Header: %w`, err) + } + } + + if h2 != nil { + if err := h2.Copy(ctx, h3); err != nil { + return nil, fmt.Errorf(`failed to copy headers from second Header: %w`, err) + } + } + + return h3, nil +} + +func (h *stdHeaders) Merge(ctx context.Context, h2 Headers) (Headers, error) { + return mergeHeaders(ctx, h, h2) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go new file mode 100644 index 0000000000..fd892e2c42 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/headers_gen.go @@ -0,0 +1,565 @@ +// This file is auto-generated by jws/internal/cmd/genheaders/main.go. DO NOT EDIT + +package jws + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + + "github.com/lestrrat-go/jwx/v2/cert" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +const ( + AlgorithmKey = "alg" + ContentTypeKey = "cty" + CriticalKey = "crit" + JWKKey = "jwk" + JWKSetURLKey = "jku" + KeyIDKey = "kid" + TypeKey = "typ" + X509CertChainKey = "x5c" + X509CertThumbprintKey = "x5t" + X509CertThumbprintS256Key = "x5t#S256" + X509URLKey = "x5u" +) + +// Headers describe a standard Header set. +type Headers interface { + json.Marshaler + json.Unmarshaler + Algorithm() jwa.SignatureAlgorithm + ContentType() string + Critical() []string + JWK() jwk.Key + JWKSetURL() string + KeyID() string + Type() string + X509CertChain() *cert.Chain + X509CertThumbprint() string + X509CertThumbprintS256() string + X509URL() string + Iterate(ctx context.Context) Iterator + Walk(context.Context, Visitor) error + AsMap(context.Context) (map[string]interface{}, error) + Copy(context.Context, Headers) error + Merge(context.Context, Headers) (Headers, error) + Get(string) (interface{}, bool) + Set(string, interface{}) error + Remove(string) error + + // PrivateParams returns the non-standard elements in the source structure + // WARNING: DO NOT USE PrivateParams() IF YOU HAVE CONCURRENT CODE ACCESSING THEM. + // Use AsMap() to get a copy of the entire header instead + PrivateParams() map[string]interface{} +} + +type stdHeaders struct { + algorithm *jwa.SignatureAlgorithm // https://tools.ietf.org/html/rfc7515#section-4.1.1 + contentType *string // https://tools.ietf.org/html/rfc7515#section-4.1.10 + critical []string // https://tools.ietf.org/html/rfc7515#section-4.1.11 + jwk jwk.Key // https://tools.ietf.org/html/rfc7515#section-4.1.3 + jwkSetURL *string // https://tools.ietf.org/html/rfc7515#section-4.1.2 + keyID *string // https://tools.ietf.org/html/rfc7515#section-4.1.4 + typ *string // https://tools.ietf.org/html/rfc7515#section-4.1.9 + x509CertChain *cert.Chain // https://tools.ietf.org/html/rfc7515#section-4.1.6 + x509CertThumbprint *string // https://tools.ietf.org/html/rfc7515#section-4.1.7 + x509CertThumbprintS256 *string // https://tools.ietf.org/html/rfc7515#section-4.1.8 + x509URL *string // https://tools.ietf.org/html/rfc7515#section-4.1.5 + privateParams map[string]interface{} + mu *sync.RWMutex + dc DecodeCtx + raw []byte // stores the raw version of the header so it can be used later +} + +func NewHeaders() Headers { + return &stdHeaders{ + mu: &sync.RWMutex{}, + } +} + +func (h *stdHeaders) Algorithm() jwa.SignatureAlgorithm { + h.mu.RLock() + defer h.mu.RUnlock() + if h.algorithm == nil { + return "" + } + return *(h.algorithm) +} + +func (h *stdHeaders) ContentType() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.contentType == nil { + return "" + } + return *(h.contentType) +} + +func (h *stdHeaders) Critical() []string { + h.mu.RLock() + defer h.mu.RUnlock() + return h.critical +} + +func (h *stdHeaders) JWK() jwk.Key { + h.mu.RLock() + defer h.mu.RUnlock() + return h.jwk +} + +func (h *stdHeaders) JWKSetURL() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.jwkSetURL == nil { + return "" + } + return *(h.jwkSetURL) +} + +func (h *stdHeaders) KeyID() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.keyID == nil { + return "" + } + return *(h.keyID) +} + +func (h *stdHeaders) Type() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.typ == nil { + return "" + } + return *(h.typ) +} + +func (h *stdHeaders) X509CertChain() *cert.Chain { + h.mu.RLock() + defer h.mu.RUnlock() + return h.x509CertChain +} + +func (h *stdHeaders) X509CertThumbprint() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprint == nil { + return "" + } + return *(h.x509CertThumbprint) +} + +func (h *stdHeaders) X509CertThumbprintS256() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509CertThumbprintS256 == nil { + return "" + } + return *(h.x509CertThumbprintS256) +} + +func (h *stdHeaders) X509URL() string { + h.mu.RLock() + defer h.mu.RUnlock() + if h.x509URL == nil { + return "" + } + return *(h.x509URL) +} + +func (h *stdHeaders) clear() { + h.algorithm = nil + h.contentType = nil + h.critical = nil + h.jwk = nil + h.jwkSetURL = nil + h.keyID = nil + h.typ = nil + h.x509CertChain = nil + h.x509CertThumbprint = nil + h.x509CertThumbprintS256 = nil + h.x509URL = nil + h.privateParams = nil + h.raw = nil +} + +func (h *stdHeaders) DecodeCtx() DecodeCtx { + h.mu.RLock() + defer h.mu.RUnlock() + return h.dc +} + +func (h *stdHeaders) SetDecodeCtx(dc DecodeCtx) { + h.mu.Lock() + defer h.mu.Unlock() + h.dc = dc +} + +func (h *stdHeaders) rawBuffer() []byte { + return h.raw +} + +func (h *stdHeaders) makePairs() []*HeaderPair { + h.mu.RLock() + defer h.mu.RUnlock() + var pairs []*HeaderPair + if h.algorithm != nil { + pairs = append(pairs, &HeaderPair{Key: AlgorithmKey, Value: *(h.algorithm)}) + } + if h.contentType != nil { + pairs = append(pairs, &HeaderPair{Key: ContentTypeKey, Value: *(h.contentType)}) + } + if h.critical != nil { + pairs = append(pairs, &HeaderPair{Key: CriticalKey, Value: h.critical}) + } + if h.jwk != nil { + pairs = append(pairs, &HeaderPair{Key: JWKKey, Value: h.jwk}) + } + if h.jwkSetURL != nil { + pairs = append(pairs, &HeaderPair{Key: JWKSetURLKey, Value: *(h.jwkSetURL)}) + } + if h.keyID != nil { + pairs = append(pairs, &HeaderPair{Key: KeyIDKey, Value: *(h.keyID)}) + } + if h.typ != nil { + pairs = append(pairs, &HeaderPair{Key: TypeKey, Value: *(h.typ)}) + } + if h.x509CertChain != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertChainKey, Value: h.x509CertChain}) + } + if h.x509CertThumbprint != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintKey, Value: *(h.x509CertThumbprint)}) + } + if h.x509CertThumbprintS256 != nil { + pairs = append(pairs, &HeaderPair{Key: X509CertThumbprintS256Key, Value: *(h.x509CertThumbprintS256)}) + } + if h.x509URL != nil { + pairs = append(pairs, &HeaderPair{Key: X509URLKey, Value: *(h.x509URL)}) + } + for k, v := range h.privateParams { + pairs = append(pairs, &HeaderPair{Key: k, Value: v}) + } + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].Key.(string) < pairs[j].Key.(string) + }) + return pairs +} + +func (h *stdHeaders) PrivateParams() map[string]interface{} { + h.mu.RLock() + defer h.mu.RUnlock() + return h.privateParams +} + +func (h *stdHeaders) Get(name string) (interface{}, bool) { + h.mu.RLock() + defer h.mu.RUnlock() + switch name { + case AlgorithmKey: + if h.algorithm == nil { + return nil, false + } + return *(h.algorithm), true + case ContentTypeKey: + if h.contentType == nil { + return nil, false + } + return *(h.contentType), true + case CriticalKey: + if h.critical == nil { + return nil, false + } + return h.critical, true + case JWKKey: + if h.jwk == nil { + return nil, false + } + return h.jwk, true + case JWKSetURLKey: + if h.jwkSetURL == nil { + return nil, false + } + return *(h.jwkSetURL), true + case KeyIDKey: + if h.keyID == nil { + return nil, false + } + return *(h.keyID), true + case TypeKey: + if h.typ == nil { + return nil, false + } + return *(h.typ), true + case X509CertChainKey: + if h.x509CertChain == nil { + return nil, false + } + return h.x509CertChain, true + case X509CertThumbprintKey: + if h.x509CertThumbprint == nil { + return nil, false + } + return *(h.x509CertThumbprint), true + case X509CertThumbprintS256Key: + if h.x509CertThumbprintS256 == nil { + return nil, false + } + return *(h.x509CertThumbprintS256), true + case X509URLKey: + if h.x509URL == nil { + return nil, false + } + return *(h.x509URL), true + default: + v, ok := h.privateParams[name] + return v, ok + } +} + +func (h *stdHeaders) Set(name string, value interface{}) error { + h.mu.Lock() + defer h.mu.Unlock() + return h.setNoLock(name, value) +} + +func (h *stdHeaders) setNoLock(name string, value interface{}) error { + switch name { + case AlgorithmKey: + var acceptor jwa.SignatureAlgorithm + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, AlgorithmKey, err) + } + h.algorithm = &acceptor + return nil + case ContentTypeKey: + if v, ok := value.(string); ok { + h.contentType = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value) + case CriticalKey: + if v, ok := value.([]string); ok { + h.critical = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, CriticalKey, value) + case JWKKey: + if v, ok := value.(jwk.Key); ok { + h.jwk = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKKey, value) + case JWKSetURLKey: + if v, ok := value.(string); ok { + h.jwkSetURL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value) + case KeyIDKey: + if v, ok := value.(string); ok { + h.keyID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, KeyIDKey, value) + case TypeKey: + if v, ok := value.(string); ok { + h.typ = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, TypeKey, value) + case X509CertChainKey: + if v, ok := value.(*cert.Chain); ok { + h.x509CertChain = v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertChainKey, value) + case X509CertThumbprintKey: + if v, ok := value.(string); ok { + h.x509CertThumbprint = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintKey, value) + case X509CertThumbprintS256Key: + if v, ok := value.(string); ok { + h.x509CertThumbprintS256 = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509CertThumbprintS256Key, value) + case X509URLKey: + if v, ok := value.(string); ok { + h.x509URL = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, X509URLKey, value) + default: + if h.privateParams == nil { + h.privateParams = map[string]interface{}{} + } + h.privateParams[name] = value + } + return nil +} + +func (h *stdHeaders) Remove(key string) error { + h.mu.Lock() + defer h.mu.Unlock() + switch key { + case AlgorithmKey: + h.algorithm = nil + case ContentTypeKey: + h.contentType = nil + case CriticalKey: + h.critical = nil + case JWKKey: + h.jwk = nil + case JWKSetURLKey: + h.jwkSetURL = nil + case KeyIDKey: + h.keyID = nil + case TypeKey: + h.typ = nil + case X509CertChainKey: + h.x509CertChain = nil + case X509CertThumbprintKey: + h.x509CertThumbprint = nil + case X509CertThumbprintS256Key: + h.x509CertThumbprintS256 = nil + case X509URLKey: + h.x509URL = nil + default: + delete(h.privateParams, key) + } + return nil +} + +func (h *stdHeaders) UnmarshalJSON(buf []byte) error { + h.mu.Lock() + defer h.mu.Unlock() + h.clear() + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case AlgorithmKey: + var decoded jwa.SignatureAlgorithm + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AlgorithmKey, err) + } + h.algorithm = &decoded + case ContentTypeKey: + if err := json.AssignNextStringToken(&h.contentType, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ContentTypeKey, err) + } + case CriticalKey: + var decoded []string + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, CriticalKey, err) + } + h.critical = decoded + case JWKKey: + var buf json.RawMessage + if err := dec.Decode(&buf); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKKey, err) + } + key, err := jwk.ParseKey(buf) + if err != nil { + return fmt.Errorf(`failed to parse JWK for key %s: %w`, JWKKey, err) + } + h.jwk = key + case JWKSetURLKey: + if err := json.AssignNextStringToken(&h.jwkSetURL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JWKSetURLKey, err) + } + case KeyIDKey: + if err := json.AssignNextStringToken(&h.keyID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, KeyIDKey, err) + } + case TypeKey: + if err := json.AssignNextStringToken(&h.typ, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, TypeKey, err) + } + case X509CertChainKey: + var decoded cert.Chain + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertChainKey, err) + } + h.x509CertChain = &decoded + case X509CertThumbprintKey: + if err := json.AssignNextStringToken(&h.x509CertThumbprint, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintKey, err) + } + case X509CertThumbprintS256Key: + if err := json.AssignNextStringToken(&h.x509CertThumbprintS256, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509CertThumbprintS256Key, err) + } + case X509URLKey: + if err := json.AssignNextStringToken(&h.x509URL, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, X509URLKey, err) + } + default: + decoded, err := registry.Decode(dec, tok) + if err != nil { + return err + } + h.setNoLock(tok, decoded) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + h.raw = buf + return nil +} + +func (h stdHeaders) MarshalJSON() ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, p := range h.makePairs() { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + buf.WriteString(p.Key.(string)) + buf.WriteString(`":`) + v := p.Value + switch v := v.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to encode value for field %s: %w`, p.Key, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go new file mode 100644 index 0000000000..247ebc76dd --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/hmac.go @@ -0,0 +1,77 @@ +package jws + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +var hmacSignFuncs = map[jwa.SignatureAlgorithm]hmacSignFunc{} + +func init() { + algs := map[jwa.SignatureAlgorithm]func() hash.Hash{ + jwa.HS256: sha256.New, + jwa.HS384: sha512.New384, + jwa.HS512: sha512.New, + } + + for alg, h := range algs { + hmacSignFuncs[alg] = makeHMACSignFunc(h) + } +} + +func newHMACSigner(alg jwa.SignatureAlgorithm) Signer { + return &HMACSigner{ + alg: alg, + sign: hmacSignFuncs[alg], // we know this will succeed + } +} + +func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc { + return func(payload []byte, key []byte) ([]byte, error) { + h := hmac.New(hfunc, key) + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload using hmac: %w`, err) + } + return h.Sum(nil), nil + } +} + +func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm { + return s.alg +} + +func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) { + var hmackey []byte + if err := keyconv.ByteSliceKey(&hmackey, key); err != nil { + return nil, fmt.Errorf(`invalid key type %T. []byte is required: %w`, key, err) + } + + if len(hmackey) == 0 { + return nil, fmt.Errorf(`missing key while signing payload`) + } + + return s.sign(payload, hmackey) +} + +func newHMACVerifier(alg jwa.SignatureAlgorithm) Verifier { + s := newHMACSigner(alg) + return &HMACVerifier{signer: s} +} + +func (v HMACVerifier) Verify(payload, signature []byte, key interface{}) (err error) { + expected, err := v.signer.Sign(payload, key) + if err != nil { + return fmt.Errorf(`failed to generated signature: %w`, err) + } + + if !hmac.Equal(signature, expected) { + return fmt.Errorf(`failed to match hmac signature`) + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go new file mode 100644 index 0000000000..9df909a7da --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/interface.go @@ -0,0 +1,106 @@ +package jws + +import ( + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +type DecodeCtx interface { + CollectRaw() bool +} + +// Message represents a full JWS encoded message. Flattened serialization +// is not supported as a struct, but rather it's represented as a +// Message struct with only one `signature` element. +// +// Do not expect to use the Message object to verify or construct a +// signed payload with. You should only use this when you want to actually +// programmatically view the contents of the full JWS payload. +// +// As of this version, there is one big incompatibility when using Message +// objects to convert between compact and JSON representations. +// The protected header is sometimes encoded differently from the original +// message and the JSON serialization that we use in Go. +// +// For example, the protected header `eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9` +// decodes to +// +// {"typ":"JWT", +// "alg":"HS256"} +// +// However, when we parse this into a message, we create a jws.Header object, +// which, when we marshal into a JSON object again, becomes +// +// {"typ":"JWT","alg":"HS256"} +// +// Notice that serialization lacks a line break and a space between `"JWT",` +// and `"alg"`. This causes a problem when verifying the signatures AFTER +// a compact JWS message has been unmarshaled into a jws.Message. +// +// jws.Verify() doesn't go through this step, and therefore this does not +// manifest itself. However, you may see this discrepancy when you manually +// go through these conversions, and/or use the `jwx` tool like so: +// +// jwx jws parse message.jws | jwx jws verify --key somekey.jwk --stdin +// +// In this scenario, the first `jwx jws parse` outputs a parsed jws.Message +// which is marshaled into JSON. At this point the message's protected +// headers and the signatures don't match. +// +// To sign and verify, use the appropriate `Sign()` and `Verify()` functions. +type Message struct { + dc DecodeCtx + payload []byte + signatures []*Signature + b64 bool // true if payload should be base64 encoded +} + +type Signature struct { + dc DecodeCtx + headers Headers // Unprotected Headers + protected Headers // Protected Headers + signature []byte // Signature + detached bool +} + +type Visitor = iter.MapVisitor +type VisitorFunc = iter.MapVisitorFunc +type HeaderPair = mapiter.Pair +type Iterator = mapiter.Iterator + +// Signer generates the signature for a given payload. +type Signer interface { + // Sign creates a signature for the given payload. + // The second argument is the key used for signing the payload, and is usually + // the private key type associated with the signature method. For example, + // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the + // `*"crypto/rsa".PrivateKey` type. + // Check the documentation for each signer for details + Sign([]byte, interface{}) ([]byte, error) + + Algorithm() jwa.SignatureAlgorithm +} + +type hmacSignFunc func([]byte, []byte) ([]byte, error) + +// HMACSigner uses crypto/hmac to sign the payloads. +type HMACSigner struct { + alg jwa.SignatureAlgorithm + sign hmacSignFunc +} + +type Verifier interface { + // Verify checks whether the payload and signature are valid for + // the given key. + // `key` is the key used for verifying the payload, and is usually + // the public key associated with the signature method. For example, + // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the + // `*"crypto/rsa".PublicKey` type. + // Check the documentation for each verifier for details + Verify(payload []byte, signature []byte, key interface{}) error +} + +type HMACVerifier struct { + signer Signer +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go new file mode 100644 index 0000000000..7bfd07f99f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/io.go @@ -0,0 +1,42 @@ +// Automatically generated by internal/cmd/genreadfile/main.go. DO NOT EDIT + +package jws + +import ( + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (*Message, error) { + var parseOptions []ParseOption + var readFileOptions []ReadFileOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } else { + readFileOptions = append(readFileOptions, option) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + srcFS = option.Value().(fs.FS) + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go new file mode 100644 index 0000000000..6778b38675 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/jws.go @@ -0,0 +1,715 @@ +//go:generate ../tools/cmd/genjws.sh + +// Package jws implements the digital signature on JSON based data +// structures as described in https://tools.ietf.org/html/rfc7515 +// +// If you do not care about the details, the only things that you +// would need to use are the following functions: +// +// jws.Sign(payload, jws.WithKey(algorithm, key)) +// jws.Verify(serialized, jws.WithKey(algorithm, key)) +// +// To sign, simply use `jws.Sign`. `payload` is a []byte buffer that +// contains whatever data you want to sign. `alg` is one of the +// jwa.SignatureAlgorithm constants from package jwa. For RSA and +// ECDSA family of algorithms, you will need to prepare a private key. +// For HMAC family, you just need a []byte value. The `jws.Sign` +// function will return the encoded JWS message on success. +// +// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer +// and verify the result using `algorithm` and `key`. Upon successful +// verification, the original payload is returned, so you can work on it. +package jws + +import ( + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "github.com/lestrrat-go/blackmagic" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/x25519" +) + +var registry = json.NewRegistry() + +type payloadSigner struct { + signer Signer + key interface{} + protected Headers + public Headers +} + +func (s *payloadSigner) Sign(payload []byte) ([]byte, error) { + return s.signer.Sign(payload, s.key) +} + +func (s *payloadSigner) Algorithm() jwa.SignatureAlgorithm { + return s.signer.Algorithm() +} + +func (s *payloadSigner) ProtectedHeader() Headers { + return s.protected +} + +func (s *payloadSigner) PublicHeader() Headers { + return s.public +} + +var signers = make(map[jwa.SignatureAlgorithm]Signer) +var muSigner = &sync.Mutex{} + +func makeSigner(alg jwa.SignatureAlgorithm, key interface{}, public, protected Headers) (*payloadSigner, error) { + muSigner.Lock() + signer, ok := signers[alg] + if !ok { + v, err := NewSigner(alg) + if err != nil { + muSigner.Unlock() + return nil, fmt.Errorf(`failed to create payload signer: %w`, err) + } + signers[alg] = v + signer = v + } + muSigner.Unlock() + + return &payloadSigner{ + signer: signer, + key: key, + public: public, + protected: protected, + }, nil +} + +const ( + fmtInvalid = iota + fmtCompact + fmtJSON + fmtJSONPretty + fmtMax +) + +// silence linters +var _ = fmtInvalid +var _ = fmtMax + +// Sign generates a JWS message for the given payload and returns +// it in serialized form, which can be in either compact or +// JSON format. Default is compact. +// +// You must pass at least one key to `jws.Sign()` by using `jws.WithKey()` +// option. +// +// jws.Sign(payload, jws.WithKey(alg, key)) +// jws.Sign(payload, jws.WithJSON(), jws.WithKey(alg1, key1), jws.WithKey(alg2, key2)) +// +// Note that in the second example the `jws.WithJSON()` option is +// specified as well. This is because the compact serialization +// format does not support multiple signatures, and users must +// specifically ask for the JSON serialization format. +// +// Read the documentation for `jws.WithKey()` to learn more about the +// possible values that can be used for `alg` and `key`. +// +// If you want to use a detached payload, use `jws.WithDetachedPayload()` as +// one of the options. When you use this option, you must always set the +// first parameter (`payload`) to `nil`, or the function will return an error +// +// You may also wantt to look at how to pass protected headers to the +// signing process, as you will likely be required to set the `b64` field +// when using detached payload. +// +// Look for options that return `jws.SignOption` or `jws.SignVerifyOption` +// for a complete list of options that can be passed to this function. +func Sign(payload []byte, options ...SignOption) ([]byte, error) { + format := fmtCompact + var signers []*payloadSigner + var detached bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identSerialization{}: + format = option.Value().(int) + case identKey{}: + data := option.Value().(*withKey) + + alg, ok := data.alg.(jwa.SignatureAlgorithm) + if !ok { + return nil, fmt.Errorf(`jws.Sign: expected algorithm to be of type jwa.SignatureAlgorithm but got (%[1]q, %[1]T)`, data.alg) + } + signer, err := makeSigner(alg, data.key, data.public, data.protected) + if err != nil { + return nil, fmt.Errorf(`jws.Sign: failed to create signer: %w`, err) + } + signers = append(signers, signer) + case identDetachedPayload{}: + detached = true + if payload != nil { + return nil, fmt.Errorf(`jws.Sign: payload must be nil when jws.WithDetachedPayload() is specified`) + } + payload = option.Value().([]byte) + } + } + + lsigner := len(signers) + if lsigner == 0 { + return nil, fmt.Errorf(`jws.Sign: no signers available. Specify an alogirthm and akey using jws.WithKey()`) + } + + // Design note: while we could have easily set format = fmtJSON when + // lsigner > 1, I believe the decision to change serialization formats + // must be explicitly stated by the caller. Otherwise I'm pretty sure + // there would be people filing issues saying "I get JSON when I expcted + // compact serialization". + // + // Therefore, instead of making implicit format conversions, we force the + // user to spell it out as `jws.Sign(..., jws.WithJSON(), jws.WithKey(...), jws.WithKey(...))` + if format == fmtCompact && lsigner != 1 { + return nil, fmt.Errorf(`jws.Sign: cannot have multiple signers (keys) specified for compact serialization. Use only one jws.WithKey()`) + } + + // Create a Message object with all the bits and bobs, and we'll + // serialize it in the end + var result Message + + result.payload = payload + + result.signatures = make([]*Signature, 0, len(signers)) + for i, signer := range signers { + protected := signer.ProtectedHeader() + if protected == nil { + protected = NewHeaders() + } + + if err := protected.Set(AlgorithmKey, signer.Algorithm()); err != nil { + return nil, fmt.Errorf(`failed to set "alg" header: %w`, err) + } + + if key, ok := signer.key.(jwk.Key); ok { + if kid := key.KeyID(); kid != "" { + if err := protected.Set(KeyIDKey, kid); err != nil { + return nil, fmt.Errorf(`failed to set "kid" header: %w`, err) + } + } + } + sig := &Signature{ + headers: signer.PublicHeader(), + protected: protected, + // cheat. FIXXXXXXMEEEEEE + detached: detached, + } + _, _, err := sig.Sign(payload, signer.signer, signer.key) + if err != nil { + return nil, fmt.Errorf(`failed to generate signature for signer #%d (alg=%s): %w`, i, signer.Algorithm(), err) + } + + result.signatures = append(result.signatures, sig) + } + + switch format { + case fmtJSON: + return json.Marshal(result) + case fmtJSONPretty: + return json.MarshalIndent(result, "", " ") + case fmtCompact: + // Take the only signature object, and convert it into a Compact + // serialization format + var compactOpts []CompactOption + if detached { + compactOpts = append(compactOpts, WithDetached(detached)) + } + return Compact(&result, compactOpts...) + default: + return nil, fmt.Errorf(`jws.Sign: invalid serialization format`) + } +} + +var allowNoneWhitelist = jwk.WhitelistFunc(func(string) bool { + return false +}) + +// Verify checks if the given JWS message is verifiable using `alg` and `key`. +// `key` may be a "raw" key (e.g. rsa.PublicKey) or a jwk.Key +// +// If the verification is successful, `err` is nil, and the content of the +// payload that was signed is returned. If you need more fine-grained +// control of the verification process, manually generate a +// `Verifier` in `verify` subpackage, and call `Verify` method on it. +// If you need to access signatures and JOSE headers in a JWS message, +// use `Parse` function to get `Message` object. +func Verify(buf []byte, options ...VerifyOption) ([]byte, error) { + var dst *Message + var detachedPayload []byte + var keyProviders []KeyProvider + var keyUsed interface{} + + ctx := context.Background() + + //nolint:forcetypeassert + for _, option := range options { + switch option.Ident() { + case identMessage{}: + dst = option.Value().(*Message) + case identDetachedPayload{}: + detachedPayload = option.Value().([]byte) + case identKey{}: + pair := option.Value().(*withKey) + alg, ok := pair.alg.(jwa.SignatureAlgorithm) + if !ok { + return nil, fmt.Errorf(`WithKey() option must be specified using jwa.SignatureAlgorithm (got %T)`, pair.alg) + } + keyProviders = append(keyProviders, &staticKeyProvider{ + alg: alg, + key: pair.key, + }) + case identKeyProvider{}: + keyProviders = append(keyProviders, option.Value().(KeyProvider)) + case identKeyUsed{}: + keyUsed = option.Value() + case identContext{}: + ctx = option.Value().(context.Context) + default: + return nil, fmt.Errorf(`invalid jws.VerifyOption %q passed`, `With`+strings.TrimPrefix(fmt.Sprintf(`%T`, option.Ident()), `jws.ident`)) + } + } + + if len(keyProviders) < 1 { + return nil, fmt.Errorf(`jws.Verify: no key providers have been provided (see jws.WithKey(), jws.WithKeySet(), jws.WithVerifyAuto(), and jws.WithKeyProvider()`) + } + + msg, err := Parse(buf) + if err != nil { + return nil, fmt.Errorf(`failed to parse jws: %w`, err) + } + defer msg.clearRaw() + + if detachedPayload != nil { + if len(msg.payload) != 0 { + return nil, fmt.Errorf(`can't specify detached payload for JWS with payload`) + } + + msg.payload = detachedPayload + } + + // Pre-compute the base64 encoded version of payload + var payload string + if msg.b64 { + payload = base64.EncodeToString(msg.payload) + } else { + payload = string(msg.payload) + } + + verifyBuf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(verifyBuf) + + for i, sig := range msg.signatures { + verifyBuf.Reset() + + var encodedProtectedHeader string + if rbp, ok := sig.protected.(interface{ rawBuffer() []byte }); ok { + if raw := rbp.rawBuffer(); raw != nil { + encodedProtectedHeader = base64.EncodeToString(raw) + } + } + + if encodedProtectedHeader == "" { + protected, err := json.Marshal(sig.protected) + if err != nil { + return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err) + } + + encodedProtectedHeader = base64.EncodeToString(protected) + } + + verifyBuf.WriteString(encodedProtectedHeader) + verifyBuf.WriteByte('.') + verifyBuf.WriteString(payload) + + for i, kp := range keyProviders { + var sink algKeySink + if err := kp.FetchKeys(ctx, &sink, sig, msg); err != nil { + return nil, fmt.Errorf(`key provider %d failed: %w`, i, err) + } + + for _, pair := range sink.list { + // alg is converted here because pair.alg is of type jwa.KeyAlgorithm. + // this may seem ugly, but we're trying to avoid declaring separate + // structs for `alg jwa.KeyAlgorithm` and `alg jwa.SignatureAlgorithm` + //nolint:forcetypeassert + alg := pair.alg.(jwa.SignatureAlgorithm) + key := pair.key + verifier, err := NewVerifier(alg) + if err != nil { + return nil, fmt.Errorf(`failed to create verifier for algorithm %q: %w`, alg, err) + } + + if err := verifier.Verify(verifyBuf.Bytes(), sig.signature, key); err != nil { + continue + } + + if keyUsed != nil { + if err := blackmagic.AssignIfCompatible(keyUsed, key); err != nil { + return nil, fmt.Errorf(`failed to assign used key (%T) to %T: %w`, key, keyUsed, err) + } + } + + if dst != nil { + *(dst) = *msg + } + + return msg.payload, nil + } + } + } + return nil, fmt.Errorf(`could not verify message using any of the signatures or keys`) +} + +// get the value of b64 header field. +// If the field does not exist, returns true (default) +// Otherwise return the value specified by the header field. +func getB64Value(hdr Headers) bool { + b64raw, ok := hdr.Get("b64") + if !ok { + return true // default + } + + b64, ok := b64raw.(bool) // default + if !ok { + return false + } + return b64 +} + +// This is an "optimized" io.ReadAll(). It will attempt to read +// all of the contents from the reader IF the reader is of a certain +// concrete type. +func readAll(rdr io.Reader) ([]byte, bool) { + switch rdr.(type) { + case *bytes.Reader, *bytes.Buffer, *strings.Reader: + data, err := io.ReadAll(rdr) + if err != nil { + return nil, false + } + return data, true + default: + return nil, false + } +} + +// Parse parses contents from the given source and creates a jws.Message +// struct. The input can be in either compact or full JSON serialization. +// +// Parse() currently does not take any options, but the API accepts it +// in anticipation of future addition. +func Parse(src []byte, _ ...ParseOption) (*Message, error) { + for i := 0; i < len(src); i++ { + r := rune(src[i]) + if r >= utf8.RuneSelf { + r, _ = utf8.DecodeRune(src) + } + if !unicode.IsSpace(r) { + if r == '{' { + return parseJSON(src) + } + return parseCompact(src) + } + } + return nil, fmt.Errorf(`invalid byte sequence`) +} + +// Parse parses contents from the given source and creates a jws.Message +// struct. The input can be in either compact or full JSON serialization. +func ParseString(src string) (*Message, error) { + return Parse([]byte(src)) +} + +// Parse parses contents from the given source and creates a jws.Message +// struct. The input can be in either compact or full JSON serialization. +func ParseReader(src io.Reader) (*Message, error) { + if data, ok := readAll(src); ok { + return Parse(data) + } + + rdr := bufio.NewReader(src) + var first rune + for { + r, _, err := rdr.ReadRune() + if err != nil { + return nil, fmt.Errorf(`failed to read rune: %w`, err) + } + if !unicode.IsSpace(r) { + first = r + if err := rdr.UnreadRune(); err != nil { + return nil, fmt.Errorf(`failed to unread rune: %w`, err) + } + + break + } + } + + var parser func(io.Reader) (*Message, error) + if first == '{' { + parser = parseJSONReader + } else { + parser = parseCompactReader + } + + m, err := parser(rdr) + if err != nil { + return nil, fmt.Errorf(`failed to parse jws message: %w`, err) + } + + return m, nil +} + +func parseJSONReader(src io.Reader) (result *Message, err error) { + var m Message + if err := json.NewDecoder(src).Decode(&m); err != nil { + return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err) + } + return &m, nil +} + +func parseJSON(data []byte) (result *Message, err error) { + var m Message + if err := json.Unmarshal(data, &m); err != nil { + return nil, fmt.Errorf(`failed to unmarshal jws message: %w`, err) + } + return &m, nil +} + +// SplitCompact splits a JWT and returns its three parts +// separately: protected headers, payload and signature. +func SplitCompact(src []byte) ([]byte, []byte, []byte, error) { + parts := bytes.Split(src, []byte(".")) + if len(parts) < 3 { + return nil, nil, nil, fmt.Errorf(`invalid number of segments`) + } + return parts[0], parts[1], parts[2], nil +} + +// SplitCompactString splits a JWT and returns its three parts +// separately: protected headers, payload and signature. +func SplitCompactString(src string) ([]byte, []byte, []byte, error) { + parts := strings.Split(src, ".") + if len(parts) < 3 { + return nil, nil, nil, fmt.Errorf(`invalid number of segments`) + } + return []byte(parts[0]), []byte(parts[1]), []byte(parts[2]), nil +} + +// SplitCompactReader splits a JWT and returns its three parts +// separately: protected headers, payload and signature. +func SplitCompactReader(rdr io.Reader) ([]byte, []byte, []byte, error) { + if data, ok := readAll(rdr); ok { + return SplitCompact(data) + } + + var protected []byte + var payload []byte + var signature []byte + var periods int + var state int + + buf := make([]byte, 4096) + var sofar []byte + + for { + // read next bytes + n, err := rdr.Read(buf) + // return on unexpected read error + if err != nil && err != io.EOF { + return nil, nil, nil, fmt.Errorf(`unexpected end of input: %w`, err) + } + + // append to current buffer + sofar = append(sofar, buf[:n]...) + // loop to capture multiple '.' in current buffer + for loop := true; loop; { + var i = bytes.IndexByte(sofar, '.') + if i == -1 && err != io.EOF { + // no '.' found -> exit and read next bytes (outer loop) + loop = false + continue + } else if i == -1 && err == io.EOF { + // no '.' found -> process rest and exit + i = len(sofar) + loop = false + } else { + // '.' found + periods++ + } + + // Reaching this point means we have found a '.' or EOF and process the rest of the buffer + switch state { + case 0: + protected = sofar[:i] + state++ + case 1: + payload = sofar[:i] + state++ + case 2: + signature = sofar[:i] + } + // Shorten current buffer + if len(sofar) > i { + sofar = sofar[i+1:] + } + } + // Exit on EOF + if err == io.EOF { + break + } + } + if periods != 2 { + return nil, nil, nil, fmt.Errorf(`invalid number of segments`) + } + + return protected, payload, signature, nil +} + +// parseCompactReader parses a JWS value serialized via compact serialization. +func parseCompactReader(rdr io.Reader) (m *Message, err error) { + protected, payload, signature, err := SplitCompactReader(rdr) + if err != nil { + return nil, fmt.Errorf(`invalid compact serialization format: %w`, err) + } + return parse(protected, payload, signature) +} + +func parseCompact(data []byte) (m *Message, err error) { + protected, payload, signature, err := SplitCompact(data) + if err != nil { + return nil, fmt.Errorf(`invalid compact serialization format: %w`, err) + } + return parse(protected, payload, signature) +} + +func parse(protected, payload, signature []byte) (*Message, error) { + decodedHeader, err := base64.Decode(protected) + if err != nil { + return nil, fmt.Errorf(`failed to decode protected headers: %w`, err) + } + + hdr := NewHeaders() + if err := json.Unmarshal(decodedHeader, hdr); err != nil { + return nil, fmt.Errorf(`failed to parse JOSE headers: %w`, err) + } + + var decodedPayload []byte + b64 := getB64Value(hdr) + if !b64 { + decodedPayload = payload + } else { + v, err := base64.Decode(payload) + if err != nil { + return nil, fmt.Errorf(`failed to decode payload: %w`, err) + } + decodedPayload = v + } + + decodedSignature, err := base64.Decode(signature) + if err != nil { + return nil, fmt.Errorf(`failed to decode signature: %w`, err) + } + + var msg Message + msg.payload = decodedPayload + msg.signatures = append(msg.signatures, &Signature{ + protected: hdr, + signature: decodedSignature, + }) + msg.b64 = b64 + return &msg, nil +} + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In that case you would register a custom field as follows +// +// jwe.RegisterCustomField(`x-birthday`, timeT) +// +// Then `hdr.Get("x-birthday")` will still return an `interface{}`, +// but you can convert its type to `time.Time` +// +// bdayif, _ := hdr.Get(`x-birthday`) +// bday := bdayif.(time.Time) +func RegisterCustomField(name string, object interface{}) { + registry.Register(name, object) +} + +// Helpers for signature verification +var rawKeyToKeyType = make(map[reflect.Type]jwa.KeyType) +var keyTypeToAlgorithms = make(map[jwa.KeyType][]jwa.SignatureAlgorithm) + +func init() { + rawKeyToKeyType[reflect.TypeOf([]byte(nil))] = jwa.OctetSeq + rawKeyToKeyType[reflect.TypeOf(ed25519.PublicKey(nil))] = jwa.OKP + rawKeyToKeyType[reflect.TypeOf(rsa.PublicKey{})] = jwa.RSA + rawKeyToKeyType[reflect.TypeOf((*rsa.PublicKey)(nil))] = jwa.RSA + rawKeyToKeyType[reflect.TypeOf(ecdsa.PublicKey{})] = jwa.EC + rawKeyToKeyType[reflect.TypeOf((*ecdsa.PublicKey)(nil))] = jwa.EC + + addAlgorithmForKeyType(jwa.OKP, jwa.EdDSA) + for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} { + addAlgorithmForKeyType(jwa.OctetSeq, alg) + } + for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} { + addAlgorithmForKeyType(jwa.RSA, alg) + } + for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512} { + addAlgorithmForKeyType(jwa.EC, alg) + } +} + +func addAlgorithmForKeyType(kty jwa.KeyType, alg jwa.SignatureAlgorithm) { + keyTypeToAlgorithms[kty] = append(keyTypeToAlgorithms[kty], alg) +} + +// AlgorithmsForKey returns the possible signature algorithms that can +// be used for a given key. It only takes in consideration keys/algorithms +// for verification purposes, as this is the only usage where one may need +// dynamically figure out which method to use. +func AlgorithmsForKey(key interface{}) ([]jwa.SignatureAlgorithm, error) { + var kty jwa.KeyType + switch key := key.(type) { + case jwk.Key: + kty = key.KeyType() + case rsa.PublicKey, *rsa.PublicKey, rsa.PrivateKey, *rsa.PrivateKey: + kty = jwa.RSA + case ecdsa.PublicKey, *ecdsa.PublicKey, ecdsa.PrivateKey, *ecdsa.PrivateKey: + kty = jwa.EC + case ed25519.PublicKey, ed25519.PrivateKey, x25519.PublicKey, x25519.PrivateKey: + kty = jwa.OKP + case []byte: + kty = jwa.OctetSeq + default: + return nil, fmt.Errorf(`invalid key %T`, key) + } + + algs, ok := keyTypeToAlgorithms[kty] + if !ok { + return nil, fmt.Errorf(`invalid key type %q`, kty) + } + return algs, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go new file mode 100644 index 0000000000..7d7518af1e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/key_provider.go @@ -0,0 +1,276 @@ +package jws + +import ( + "context" + "fmt" + "net/url" + "sync" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +// KeyProvider is responsible for providing key(s) to sign or verify a payload. +// Multiple `jws.KeyProvider`s can be passed to `jws.Verify()` or `jws.Sign()` +// +// `jws.Sign()` can only accept static key providers via `jws.WithKey()`, +// while `jws.Verify()` can accept `jws.WithKey()`, `jws.WithKeySet()`, +// `jws.WithVerifyAuto()`, and `jws.WithKeyProvider()`. +// +// Understanding how this works is crucial to learn how this package works. +// +// `jws.Sign()` is straightforward: signatures are created for each +// provided key. +// +// `jws.Verify()` is a bit more involved, because there are cases you +// will want to compute/deduce/guess the keys that you would like to +// use for verification. +// +// The first thing that `jws.Verify()` does is to collect the +// KeyProviders from the option list that the user provided (presented in pseudocode): +// +// keyProviders := filterKeyProviders(options) +// +// Then, remember that a JWS message may contain multiple signatures in the +// message. For each signature, we call on the KeyProviders to give us +// the key(s) to use on this signature: +// +// for sig in msg.Signatures { +// for kp in keyProviders { +// kp.FetcKeys(ctx, sink, sig, msg) +// ... +// } +// } +// +// The `sink` argument passed to the KeyProvider is a temporary storage +// for the keys (either a jwk.Key or a "raw" key). The `KeyProvider` +// is responsible for sending keys into the `sink`. +// +// When called, the `KeyProvider` created by `jws.WithKey()` sends the same key, +// `jws.WithKeySet()` sends keys that matches a particular `kid` and `alg`, +// `jws.WithVerifyAuto()` fetchs a JWK from the `jku` URL, +// and finally `jws.WithKeyProvider()` allows you to execute arbitrary +// logic to provide keys. If you are providing a custom `KeyProvider`, +// you should execute the necessary checks or retrieval of keys, and +// then send the key(s) to the sink: +// +// sink.Key(alg, key) +// +// These keys are then retrieved and tried for each signature, until +// a match is found: +// +// keys := sink.Keys() +// for key in keys { +// if givenSignature == makeSignatre(key, payload, ...)) { +// return OK +// } +// } +type KeyProvider interface { + FetchKeys(context.Context, KeySink, *Signature, *Message) error +} + +// KeySink is a data storage where `jws.KeyProvider` objects should +// send their keys to. +type KeySink interface { + Key(jwa.SignatureAlgorithm, interface{}) +} + +type algKeyPair struct { + alg jwa.KeyAlgorithm + key interface{} +} + +type algKeySink struct { + mu sync.Mutex + list []algKeyPair +} + +func (s *algKeySink) Key(alg jwa.SignatureAlgorithm, key interface{}) { + s.mu.Lock() + s.list = append(s.list, algKeyPair{alg, key}) + s.mu.Unlock() +} + +type staticKeyProvider struct { + alg jwa.SignatureAlgorithm + key interface{} +} + +func (kp *staticKeyProvider) FetchKeys(_ context.Context, sink KeySink, _ *Signature, _ *Message) error { + sink.Key(kp.alg, kp.key) + return nil +} + +type keySetProvider struct { + set jwk.Set + requireKid bool // true if `kid` must be specified + useDefault bool // true if the first key should be used iff there's exactly one key in set + inferAlgorithm bool // true if the algorithm should be inferred from key type + multipleKeysPerKeyID bool // true if we should attempt to match multiple keys per key ID. if false we assume that only one key exists for a given key ID +} + +func (kp *keySetProvider) selectKey(sink KeySink, key jwk.Key, sig *Signature, _ *Message) error { + if usage := key.KeyUsage(); usage != "" && usage != jwk.ForSignature.String() { + return nil + } + + if v := key.Algorithm(); v.String() != "" { + var alg jwa.SignatureAlgorithm + if err := alg.Accept(v); err != nil { + return fmt.Errorf(`invalid signature algorithm %s: %w`, key.Algorithm(), err) + } + + sink.Key(alg, key) + return nil + } + + if kp.inferAlgorithm { + algs, err := AlgorithmsForKey(key) + if err != nil { + return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err) + } + + // bail out if the JWT has a `alg` field, and it doesn't match + if tokAlg := sig.ProtectedHeaders().Algorithm(); tokAlg != "" { + for _, alg := range algs { + if tokAlg == alg { + sink.Key(alg, key) + return nil + } + } + return fmt.Errorf(`algorithm in the message does not match any of the inferred algorithms`) + } + + // Yes, you get to try them all!!!!!!! + for _, alg := range algs { + sink.Key(alg, key) + } + return nil + } + return nil +} + +func (kp *keySetProvider) FetchKeys(_ context.Context, sink KeySink, sig *Signature, msg *Message) error { + if kp.requireKid { + wantedKid := sig.ProtectedHeaders().KeyID() + if wantedKid == "" { + // If the kid is NOT specified... kp.useDefault needs to be true, and the + // JWKs must have exactly one key in it + if !kp.useDefault { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token`) + } else if kp.useDefault && kp.set.Len() > 1 { + return fmt.Errorf(`failed to find matching key: no key ID ("kid") specified in token but multiple keys available in key set`) + } + + // if we got here, then useDefault == true AND there is exactly + // one key in the set. + key, _ := kp.set.Key(0) + return kp.selectKey(sink, key, sig, msg) + } + + // Otherwise we better be able to look up the key. + // <= v2.0.3 backwards compatible case: only match a single key + // whose key ID matches `wantedKid` + if !kp.multipleKeysPerKeyID { + key, ok := kp.set.LookupKeyID(wantedKid) + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + return kp.selectKey(sink, key, sig, msg) + } + + // if multipleKeysPerKeyID is true, we attempt all keys whose key ID matches + // the wantedKey + var ok bool + for i := 0; i < kp.set.Len(); i++ { + key, _ := kp.set.Key(i) + if key.KeyID() != wantedKid { + continue + } + + if err := kp.selectKey(sink, key, sig, msg); err != nil { + continue + } + ok = true + // continue processing so that we try all keys with the same key ID + } + if !ok { + return fmt.Errorf(`failed to find key with key ID %q in key set`, wantedKid) + } + return nil + } + + // Otherwise just try all keys + for i := 0; i < kp.set.Len(); i++ { + key, _ := kp.set.Key(i) + if err := kp.selectKey(sink, key, sig, msg); err != nil { + continue + } + } + return nil +} + +type jkuProvider struct { + fetcher jwk.Fetcher + options []jwk.FetchOption +} + +func (kp jkuProvider) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, _ *Message) error { + kid := sig.ProtectedHeaders().KeyID() + if kid == "" { + return fmt.Errorf(`use of "jku" requires that the payload contain a "kid" field in the protected header`) + } + + // errors here can't be reliablly passed to the consumers. + // it's unfortunate, but if you need this control, you are + // going to have to write your own fetcher + u := sig.ProtectedHeaders().JWKSetURL() + if u == "" { + return fmt.Errorf(`use of "jku" field specified, but the field is empty`) + } + uo, err := url.Parse(u) + if err != nil { + return fmt.Errorf(`failed to parse "jku": %w`, err) + } + if uo.Scheme != "https" { + return fmt.Errorf(`url in "jku" must be HTTPS`) + } + + set, err := kp.fetcher.Fetch(ctx, u, kp.options...) + if err != nil { + return fmt.Errorf(`failed to fetch %q: %w`, u, err) + } + + key, ok := set.LookupKeyID(kid) + if !ok { + // It is not an error if the key with the kid doesn't exist + return nil + } + + algs, err := AlgorithmsForKey(key) + if err != nil { + return fmt.Errorf(`failed to get a list of signature methods for key type %s: %w`, key.KeyType(), err) + } + + hdrAlg := sig.ProtectedHeaders().Algorithm() + for _, alg := range algs { + // if we have a "alg" field in the JWS, we can only proceed if + // the inferred algorithm matches + if hdrAlg != "" && hdrAlg != alg { + continue + } + + sink.Key(alg, key) + break + } + return nil +} + +// KeyProviderFunc is a type of KeyProvider that is implemented by +// a single function. You can use this to create ad-hoc `KeyProvider` +// instances. +type KeyProviderFunc func(context.Context, KeySink, *Signature, *Message) error + +func (kp KeyProviderFunc) FetchKeys(ctx context.Context, sink KeySink, sig *Signature, msg *Message) error { + return kp(ctx, sink, sig, msg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go new file mode 100644 index 0000000000..e028422360 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/message.go @@ -0,0 +1,497 @@ +package jws + +import ( + "bytes" + "context" + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwk" +) + +func NewSignature() *Signature { + return &Signature{} +} + +func (s *Signature) DecodeCtx() DecodeCtx { + return s.dc +} + +func (s *Signature) SetDecodeCtx(dc DecodeCtx) { + s.dc = dc +} + +func (s Signature) PublicHeaders() Headers { + return s.headers +} + +func (s *Signature) SetPublicHeaders(v Headers) *Signature { + s.headers = v + return s +} + +func (s Signature) ProtectedHeaders() Headers { + return s.protected +} + +func (s *Signature) SetProtectedHeaders(v Headers) *Signature { + s.protected = v + return s +} + +func (s Signature) Signature() []byte { + return s.signature +} + +func (s *Signature) SetSignature(v []byte) *Signature { + s.signature = v + return s +} + +type signatureUnmarshalProbe struct { + Header Headers `json:"header,omitempty"` + Protected *string `json:"protected,omitempty"` + Signature *string `json:"signature,omitempty"` +} + +func (s *Signature) UnmarshalJSON(data []byte) error { + var sup signatureUnmarshalProbe + sup.Header = NewHeaders() + if err := json.Unmarshal(data, &sup); err != nil { + return fmt.Errorf(`failed to unmarshal signature into temporary struct: %w`, err) + } + + s.headers = sup.Header + if buf := sup.Protected; buf != nil { + src := []byte(*buf) + if !bytes.HasPrefix(src, []byte{'{'}) { + decoded, err := base64.Decode(src) + if err != nil { + return fmt.Errorf(`failed to base64 decode protected headers: %w`, err) + } + src = decoded + } + + prt := NewHeaders() + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(s.DecodeCtx()) + if err := json.Unmarshal(src, prt); err != nil { + return fmt.Errorf(`failed to unmarshal protected headers: %w`, err) + } + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(nil) + s.protected = prt + } + + decoded, err := base64.DecodeString(*sup.Signature) + if err != nil { + return fmt.Errorf(`failed to base decode signature: %w`, err) + } + s.signature = decoded + return nil +} + +// Sign populates the signature field, with a signature generated by +// given the signer object and payload. +// +// The first return value is the raw signature in binary format. +// The second return value s the full three-segment signature +// (e.g. "eyXXXX.XXXXX.XXXX") +func (s *Signature) Sign(payload []byte, signer Signer, key interface{}) ([]byte, []byte, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hdrs, err := mergeHeaders(ctx, s.headers, s.protected) + if err != nil { + return nil, nil, fmt.Errorf(`failed to merge headers: %w`, err) + } + + if err := hdrs.Set(AlgorithmKey, signer.Algorithm()); err != nil { + return nil, nil, fmt.Errorf(`failed to set "alg": %w`, err) + } + + // If the key is a jwk.Key instance, obtain the raw key + if jwkKey, ok := key.(jwk.Key); ok { + // If we have a key ID specified by this jwk.Key, use that in the header + if kid := jwkKey.KeyID(); kid != "" { + if err := hdrs.Set(jwk.KeyIDKey, kid); err != nil { + return nil, nil, fmt.Errorf(`set key ID from jwk.Key: %w`, err) + } + } + } + hdrbuf, err := json.Marshal(hdrs) + if err != nil { + return nil, nil, fmt.Errorf(`failed to marshal headers: %w`, err) + } + + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.WriteString(base64.EncodeToString(hdrbuf)) + buf.WriteByte('.') + + var plen int + b64 := getB64Value(hdrs) + if b64 { + encoded := base64.EncodeToString(payload) + plen = len(encoded) + buf.WriteString(encoded) + } else { + if !s.detached { + if bytes.Contains(payload, []byte{'.'}) { + return nil, nil, fmt.Errorf(`payload must not contain a "."`) + } + } + plen = len(payload) + buf.Write(payload) + } + + signature, err := signer.Sign(buf.Bytes(), key) + if err != nil { + return nil, nil, fmt.Errorf(`failed to sign payload: %w`, err) + } + s.signature = signature + + // Detached payload, this should be removed from the end result + if s.detached { + buf.Truncate(buf.Len() - plen) + } + + buf.WriteByte('.') + buf.WriteString(base64.EncodeToString(signature)) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + + return signature, ret, nil +} + +func NewMessage() *Message { + return &Message{} +} + +// Clears the internal raw buffer that was accumulated during +// the verify phase +func (m *Message) clearRaw() { + for _, sig := range m.signatures { + if protected := sig.protected; protected != nil { + if cr, ok := protected.(*stdHeaders); ok { + cr.raw = nil + } + } + } +} + +func (m *Message) SetDecodeCtx(dc DecodeCtx) { + m.dc = dc +} + +func (m *Message) DecodeCtx() DecodeCtx { + return m.dc +} + +// Payload returns the decoded payload +func (m Message) Payload() []byte { + return m.payload +} + +func (m *Message) SetPayload(v []byte) *Message { + m.payload = v + return m +} + +func (m Message) Signatures() []*Signature { + return m.signatures +} + +func (m *Message) AppendSignature(v *Signature) *Message { + m.signatures = append(m.signatures, v) + return m +} + +func (m *Message) ClearSignatures() *Message { + m.signatures = nil + return m +} + +// LookupSignature looks up a particular signature entry using +// the `kid` value +func (m Message) LookupSignature(kid string) []*Signature { + var sigs []*Signature + for _, sig := range m.signatures { + if hdr := sig.PublicHeaders(); hdr != nil { + hdrKeyID := hdr.KeyID() + if hdrKeyID == kid { + sigs = append(sigs, sig) + continue + } + } + + if hdr := sig.ProtectedHeaders(); hdr != nil { + hdrKeyID := hdr.KeyID() + if hdrKeyID == kid { + sigs = append(sigs, sig) + continue + } + } + } + return sigs +} + +// This struct is used to first probe for the structure of the +// incoming JSON object. We then decide how to parse it +// from the fields that are populated. +type messageUnmarshalProbe struct { + Payload *string `json:"payload"` + Signatures []json.RawMessage `json:"signatures,omitempty"` + Header Headers `json:"header,omitempty"` + Protected *string `json:"protected,omitempty"` + Signature *string `json:"signature,omitempty"` +} + +func (m *Message) UnmarshalJSON(buf []byte) error { + m.payload = nil + m.signatures = nil + m.b64 = true + + var mup messageUnmarshalProbe + mup.Header = NewHeaders() + if err := json.Unmarshal(buf, &mup); err != nil { + return fmt.Errorf(`failed to unmarshal into temporary structure: %w`, err) + } + + b64 := true + if mup.Signature == nil { // flattened signature is NOT present + if len(mup.Signatures) == 0 { + return fmt.Errorf(`required field "signatures" not present`) + } + + m.signatures = make([]*Signature, 0, len(mup.Signatures)) + for i, rawsig := range mup.Signatures { + var sig Signature + sig.SetDecodeCtx(m.DecodeCtx()) + if err := json.Unmarshal(rawsig, &sig); err != nil { + return fmt.Errorf(`failed to unmarshal signature #%d: %w`, i+1, err) + } + sig.SetDecodeCtx(nil) + + if i == 0 { + if !getB64Value(sig.protected) { + b64 = false + } + } else { + if b64 != getB64Value(sig.protected) { + return fmt.Errorf(`b64 value must be the same for all signatures`) + } + } + + m.signatures = append(m.signatures, &sig) + } + } else { // .signature is present, it's a flattened structure + if len(mup.Signatures) != 0 { + return fmt.Errorf(`invalid format ("signatures" and "signature" keys cannot both be present)`) + } + + var sig Signature + sig.headers = mup.Header + if src := mup.Protected; src != nil { + decoded, err := base64.DecodeString(*src) + if err != nil { + return fmt.Errorf(`failed to base64 decode flattened protected headers: %w`, err) + } + prt := NewHeaders() + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(m.DecodeCtx()) + if err := json.Unmarshal(decoded, prt); err != nil { + return fmt.Errorf(`failed to unmarshal flattened protected headers: %w`, err) + } + //nolint:forcetypeassert + prt.(*stdHeaders).SetDecodeCtx(nil) + sig.protected = prt + } + + decoded, err := base64.DecodeString(*mup.Signature) + if err != nil { + return fmt.Errorf(`failed to base64 decode flattened signature: %w`, err) + } + sig.signature = decoded + + m.signatures = []*Signature{&sig} + b64 = getB64Value(sig.protected) + } + + if mup.Payload != nil { + if !b64 { // NOT base64 encoded + m.payload = []byte(*mup.Payload) + } else { + decoded, err := base64.DecodeString(*mup.Payload) + if err != nil { + return fmt.Errorf(`failed to base64 decode payload: %w`, err) + } + m.payload = decoded + } + } + m.b64 = b64 + return nil +} + +func (m Message) MarshalJSON() ([]byte, error) { + if len(m.signatures) == 1 { + return m.marshalFlattened() + } + return m.marshalFull() +} + +func (m Message) marshalFlattened() ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + sig := m.signatures[0] + + buf.WriteRune('{') + var wrote bool + + if hdr := sig.headers; hdr != nil { + hdrjs, err := hdr.MarshalJSON() + if err != nil { + return nil, fmt.Errorf(`failed to marshal "header" (flattened format): %w`, err) + } + buf.WriteString(`"header":`) + buf.Write(hdrjs) + wrote = true + } + + if wrote { + buf.WriteRune(',') + } + buf.WriteString(`"payload":"`) + buf.WriteString(base64.EncodeToString(m.payload)) + buf.WriteRune('"') + + if protected := sig.protected; protected != nil { + protectedbuf, err := protected.MarshalJSON() + if err != nil { + return nil, fmt.Errorf(`failed to marshal "protected" (flattened format): %w`, err) + } + buf.WriteString(`,"protected":"`) + buf.WriteString(base64.EncodeToString(protectedbuf)) + buf.WriteRune('"') + } + + buf.WriteString(`,"signature":"`) + buf.WriteString(base64.EncodeToString(sig.signature)) + buf.WriteRune('"') + buf.WriteRune('}') + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (m Message) marshalFull() ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.WriteString(`{"payload":"`) + buf.WriteString(base64.EncodeToString(m.payload)) + buf.WriteString(`","signatures":[`) + for i, sig := range m.signatures { + if i > 0 { + buf.WriteRune(',') + } + + buf.WriteRune('{') + var wrote bool + if hdr := sig.headers; hdr != nil { + hdrbuf, err := hdr.MarshalJSON() + if err != nil { + return nil, fmt.Errorf(`failed to marshal "header" for signature #%d: %w`, i+1, err) + } + buf.WriteString(`"header":`) + buf.Write(hdrbuf) + wrote = true + } + + if protected := sig.protected; protected != nil { + protectedbuf, err := protected.MarshalJSON() + if err != nil { + return nil, fmt.Errorf(`failed to marshal "protected" for signature #%d: %w`, i+1, err) + } + if wrote { + buf.WriteRune(',') + } + buf.WriteString(`"protected":"`) + buf.WriteString(base64.EncodeToString(protectedbuf)) + buf.WriteRune('"') + wrote = true + } + + if wrote { + buf.WriteRune(',') + } + buf.WriteString(`"signature":"`) + buf.WriteString(base64.EncodeToString(sig.signature)) + buf.WriteString(`"}`) + } + buf.WriteString(`]}`) + + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +// Compact generates a JWS message in compact serialization format from +// `*jws.Message` object. The object contain exactly one signature, or +// an error is returned. +// +// If using a detached payload, the payload must already be stored in +// the `*jws.Message` object, and the `jws.WithDetached()` option +// must be passed to the function. +func Compact(msg *Message, options ...CompactOption) ([]byte, error) { + if l := len(msg.signatures); l != 1 { + return nil, fmt.Errorf(`jws.Compact: cannot serialize message with %d signatures (must be one)`, l) + } + + var detached bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identDetached{}: + detached = option.Value().(bool) + } + } + + s := msg.signatures[0] + // XXX check if this is correct + hdrs := s.ProtectedHeaders() + + hdrbuf, err := json.Marshal(hdrs) + if err != nil { + return nil, fmt.Errorf(`jws.Compress: failed to marshal headers: %w`, err) + } + + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + + buf.WriteString(base64.EncodeToString(hdrbuf)) + buf.WriteByte('.') + + if !detached { + if getB64Value(hdrs) { + encoded := base64.EncodeToString(msg.payload) + buf.WriteString(encoded) + } else { + if bytes.Contains(msg.payload, []byte{'.'}) { + return nil, fmt.Errorf(`jws.Compress: payload must not contain a "."`) + } + buf.Write(msg.payload) + } + } + + buf.WriteByte('.') + buf.WriteString(base64.EncodeToString(s.signature)) + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go new file mode 100644 index 0000000000..7b38e92dd3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.go @@ -0,0 +1,159 @@ +package jws + +import ( + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/option" +) + +type identHeaders struct{} + +// WithHeaders allows you to specify extra header values to include in the +// final JWS message +func WithHeaders(h Headers) SignOption { + return &signOption{option.New(identHeaders{}, h)} +} + +// WithJSON specifies that the result of `jws.Sign()` is serialized in +// JSON format. +// +// If you pass multiple keys to `jws.Sign()`, it will fail unless +// you also pass this option. +func WithJSON(options ...WithJSONSuboption) SignOption { + var pretty bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identPretty{}: + pretty = option.Value().(bool) + } + } + + format := fmtJSON + if pretty { + format = fmtJSONPretty + } + return &signOption{option.New(identSerialization{}, format)} +} + +type withKey struct { + alg jwa.KeyAlgorithm + key interface{} + protected Headers + public Headers +} + +// This exist as escape hatches to modify the header values after the fact +func (w *withKey) Protected(v Headers) Headers { + if w.protected == nil && v != nil { + w.protected = v + } + return w.protected +} + +// WithKey is used to pass a static algorithm/key pair to either `jws.Sign()` or `jws.Verify()`. +// +// The `alg` parameter is the identifier for the signature algorithm that should be used. +// It is of type `jwa.KeyAlgorithm` but in reality you can only pass `jwa.SignatureAlgorithm` +// types. It is this way so that the value in `(jwk.Key).Algorithm()` can be directly +// passed to the option. If you specify other algorithm types such as `jwa.ContentEncryptionAlgorithm`, +// then you will get an error when `jws.Sign()` or `jws.Verify()` is executed. +// +// The algorithm specified in the `alg` parameter must be able to support +// the type of key you provided, otherwise an error is returned. +// +// Any of the followin is accepted for the `key` parameter: +// * A "raw" key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc) +// * A crypto.Signer +// * A jwk.Key +// +// A `crypto.Signer` is used when the private part of a key is +// kept in an inaccessible location, such as hardware. +// `crypto.Signer` is currently supported for RSA, ECDSA, and EdDSA +// family of algorithms. You may consider using `github.com/jwx-go/crypto-signer` +// if you would like to use keys stored in GCP/AWS KMS services. +// +// If the key is a jwk.Key and the key contains a key ID (`kid` field), +// then it is added to the protected header generated by the signature. +// +// `jws.WithKey()` can furher accept suboptions to change signing behavior +// when used with `jws.Sign()`. `jws.WithProtected()` and `jws.WithPublic()` +// can be passed to specify JWS headers that should be used whe signing. +// +// If the protected headers contain "b64" field, then the boolean value for the field +// is respected when serializing. That is, if you specify a header with +// `{"b64": false}`, then the payload is not base64 encoded. +// +// These suboptions are ignored whe the `jws.WithKey()` option is used with `jws.Verify()`. +func WithKey(alg jwa.KeyAlgorithm, key interface{}, options ...WithKeySuboption) SignVerifyOption { + // Implementation note: this option is shared between Sign() and + // Verify(). As such we don't create a KeyProvider here because + // if used in Sign() we would be doing something else. + var protected, public Headers + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identProtectedHeaders{}: + protected = option.Value().(Headers) + case identPublicHeaders{}: + public = option.Value().(Headers) + } + } + + return &signVerifyOption{ + option.New(identKey{}, &withKey{ + alg: alg, + key: key, + protected: protected, + public: public, + }), + } +} + +// WithKeySet specifies a JWKS (jwk.Set) to use for verification. +// +// By default both `alg` and `kid` fields in the JWS _and_ the +// key must match for a key in the JWKS to be considered to be used. +// +// The behavior can be tweaked by using the `jws.WithKeySetSuboption` +// suboption types. +func WithKeySet(set jwk.Set, options ...WithKeySetSuboption) VerifyOption { + requireKid := true + var useDefault, inferAlgorithm, multipleKeysPerKeyID bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identRequireKid{}: + requireKid = option.Value().(bool) + case identUseDefault{}: + useDefault = option.Value().(bool) + case identMultipleKeysPerKeyID{}: + multipleKeysPerKeyID = option.Value().(bool) + case identInferAlgorithmFromKey{}: + inferAlgorithm = option.Value().(bool) + } + } + + return WithKeyProvider(&keySetProvider{ + set: set, + requireKid: requireKid, + useDefault: useDefault, + multipleKeysPerKeyID: multipleKeysPerKeyID, + inferAlgorithm: inferAlgorithm, + }) +} + +func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) VerifyOption { + if f == nil { + f = jwk.FetchFunc(jwk.Fetch) + } + + // the option MUST start with a "disallow no whitelist" to force + // users provide a whitelist + options = append(append([]jwk.FetchOption(nil), jwk.WithFetchWhitelist(allowNoneWhitelist)), options...) + + return WithKeyProvider(jkuProvider{ + fetcher: f, + options: options, + }) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml new file mode 100644 index 0000000000..5e1b5b2adc --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options.yaml @@ -0,0 +1,167 @@ +package_name: jws +output: jws/options_gen.go +interfaces: + - name: CompactOption + comment: | + CompactOption describes options that can be passed to `jws.Compact` + - name: VerifyOption + comment: | + VerifyOption describes options that can be passed to `jws.Verify` + - name: SignOption + comment: | + SignOption describes options that can be passed to `jws.Sign` + - name: SignVerifyOption + methods: + - signOption + - verifyOption + comment: | + SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign` + - name: WithJSONSuboption + concrete_type: withJSONSuboption + comment: | + JSONSuboption describes suboptions that can be passed to `jws.WithJSON()` option + - name: WithKeySuboption + comment: | + WithKeySuboption describes option types that can be passed to the `jws.WithKey()` + option. + - name: WithKeySetSuboption + comment: | + WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option + - name: ParseOption + methods: + - readFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +options: + - ident: Key + skip_option: true + - ident: Serialization + skip_option: true + - ident: Serialization + option_name: WithCompact + interface: SignOption + constant_value: fmtCompact + comment: | + WithCompact specifies that the result of `jws.Sign()` is serialized in + compact format. + + By default `jws.Sign()` will opt to use compact format, so you usually + do not need to specify this option other than to be explicit about it + - ident: Detached + interface: CompactOption + argument_type: bool + comment: | + WithDetached specifies that the `jws.Message` should be serialized in + JWS compact serialization with detached payload. The resulting octet + sequence will not contain the payload section. + - ident: DetachedPayload + interface: SignVerifyOption + argument_type: '[]byte' + comment: | + WithDetachedPayload can be used to both sign or verify a JWS message with a + detached payload. + + When this option is used for `jws.Sign()`, the first parameter (normally the payload) + must be set to `nil`. + + If you have to verify using this option, you should know exactly how and why this works. + - ident: Message + interface: VerifyOption + argument_type: '*Message' + comment: | + WithMessage can be passed to Verify() to obtain the jws.Message upon + a successful verification. + - ident: KeyUsed + interface: VerifyOption + argument_type: 'interface{}' + comment: | + WithKeyUsed allows you to specify the `jws.Verify()` function to + return the key used for verification. This may be useful when + you specify multiple key sources or if you pass a `jwk.Set` + and you want to know which key was successful at verifying the + signature. + + `v` must be a pointer to an empty `interface{}`. Do not use + `jwk.Key` here unless you are 100% sure that all keys that you + have provided are instances of `jwk.Key` (remember that the + jwx API allows users to specify a raw key such as *rsa.PublicKey) + - ident: InferAlgorithmFromKey + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name + should be inferred by looking at the provided key, in case the JWS + message or the key does not have a proper `alg` header. + + Compared to providing explicit `alg` from the key this is slower, and + verification may fail to verify if some how our heuristics are wrong + or outdated. + + Also, automatic detection of signature verification methods are always + more vulnerable for potential attack vectors. + + It is highly recommended that you fix your key to contain a proper `alg` + header field instead of resorting to using this option, but sometimes + it just needs to happen. + - ident: UseDefault + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithUseDefault specifies that if and only if a jwk.Key contains + exactly one jwk.Key, that tkey should be used. + (I think this should be removed) + - ident: RequireKid + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithRequiredKid specifies whether the keys in the jwk.Set should + only be matched if the target JWS message's Key ID and the Key ID + in the given key matches. + - ident: MultipleKeysPerKeyID + interface: WithKeySetSuboption + argument_type: bool + comment: | + WithMultipleKeysPerKeyID specifies if we should expect multiple keys + to match against a key ID. By default it is assumed that key IDs are + unique, i.e. for a given key ID, the key set only contains a single + key that has the matching ID. When this option is set to true, + multiple keys that match the same key ID in the set can be tried. + - ident: Pretty + interface: WithJSONSuboption + argument_type: bool + comment: | + WithPretty specifies whether the JSON output should be formatted and + indented + - ident: KeyProvider + interface: VerifyOption + argument_type: KeyProvider + - ident: Context + interface: VerifyOption + argument_type: context.Context + - ident: ProtectedHeaders + interface: WithKeySuboption + argument_type: Headers + comment: | + WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()` + to specify a protected header to be attached to the JWS signature. + + It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` + - ident: PublicHeaders + interface: WithKeySuboption + argument_type: Headers + comment: | + WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()` + to specify a public header to be attached to the JWS signature. + + It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` + + `jws.Sign()` will result in an error if `jws.WithPublic()` is used + and the serialization format is compact serialization. + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go new file mode 100644 index 0000000000..fea18bf79f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/options_gen.go @@ -0,0 +1,331 @@ +// This file is auto-generated by internal/cmd/genoptions/main.go. DO NOT EDIT + +package jws + +import ( + "context" + "io/fs" + + "github.com/lestrrat-go/option" +) + +type Option = option.Interface + +// CompactOption describes options that can be passed to `jws.Compact` +type CompactOption interface { + Option + compactOption() +} + +type compactOption struct { + Option +} + +func (*compactOption) compactOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jwe.Parse` +type ParseOption interface { + Option + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// SignOption describes options that can be passed to `jws.Sign` +type SignOption interface { + Option + signOption() +} + +type signOption struct { + Option +} + +func (*signOption) signOption() {} + +// SignVerifyOption describes options that can be passed to either `jws.Verify` or `jws.Sign` +type SignVerifyOption interface { + Option + signOption() + verifyOption() +} + +type signVerifyOption struct { + Option +} + +func (*signVerifyOption) signOption() {} + +func (*signVerifyOption) verifyOption() {} + +// VerifyOption describes options that can be passed to `jws.Verify` +type VerifyOption interface { + Option + verifyOption() +} + +type verifyOption struct { + Option +} + +func (*verifyOption) verifyOption() {} + +// JSONSuboption describes suboptions that can be passed to `jws.WithJSON()` option +type WithJSONSuboption interface { + Option + withJSONSuboption() +} + +type withJSONSuboption struct { + Option +} + +func (*withJSONSuboption) withJSONSuboption() {} + +// WithKeySetSuboption is a suboption passed to the `jws.WithKeySet()` option +type WithKeySetSuboption interface { + Option + withKeySetSuboption() +} + +type withKeySetSuboption struct { + Option +} + +func (*withKeySetSuboption) withKeySetSuboption() {} + +// WithKeySuboption describes option types that can be passed to the `jws.WithKey()` +// option. +type WithKeySuboption interface { + Option + withKeySuboption() +} + +type withKeySuboption struct { + Option +} + +func (*withKeySuboption) withKeySuboption() {} + +type identContext struct{} +type identDetached struct{} +type identDetachedPayload struct{} +type identFS struct{} +type identInferAlgorithmFromKey struct{} +type identKey struct{} +type identKeyProvider struct{} +type identKeyUsed struct{} +type identMessage struct{} +type identMultipleKeysPerKeyID struct{} +type identPretty struct{} +type identProtectedHeaders struct{} +type identPublicHeaders struct{} +type identRequireKid struct{} +type identSerialization struct{} +type identUseDefault struct{} + +func (identContext) String() string { + return "WithContext" +} + +func (identDetached) String() string { + return "WithDetached" +} + +func (identDetachedPayload) String() string { + return "WithDetachedPayload" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identInferAlgorithmFromKey) String() string { + return "WithInferAlgorithmFromKey" +} + +func (identKey) String() string { + return "WithKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identKeyUsed) String() string { + return "WithKeyUsed" +} + +func (identMessage) String() string { + return "WithMessage" +} + +func (identMultipleKeysPerKeyID) String() string { + return "WithMultipleKeysPerKeyID" +} + +func (identPretty) String() string { + return "WithPretty" +} + +func (identProtectedHeaders) String() string { + return "WithProtectedHeaders" +} + +func (identPublicHeaders) String() string { + return "WithPublicHeaders" +} + +func (identRequireKid) String() string { + return "WithRequireKid" +} + +func (identSerialization) String() string { + return "WithSerialization" +} + +func (identUseDefault) String() string { + return "WithUseDefault" +} + +func WithContext(v context.Context) VerifyOption { + return &verifyOption{option.New(identContext{}, v)} +} + +// WithDetached specifies that the `jws.Message` should be serialized in +// JWS compact serialization with detached payload. The resulting octet +// sequence will not contain the payload section. +func WithDetached(v bool) CompactOption { + return &compactOption{option.New(identDetached{}, v)} +} + +// WithDetachedPayload can be used to both sign or verify a JWS message with a +// detached payload. +// +// When this option is used for `jws.Sign()`, the first parameter (normally the payload) +// must be set to `nil`. +// +// If you have to verify using this option, you should know exactly how and why this works. +func WithDetachedPayload(v []byte) SignVerifyOption { + return &signVerifyOption{option.New(identDetachedPayload{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithInferAlgorithmFromKey specifies whether the JWS signing algorithm name +// should be inferred by looking at the provided key, in case the JWS +// message or the key does not have a proper `alg` header. +// +// Compared to providing explicit `alg` from the key this is slower, and +// verification may fail to verify if some how our heuristics are wrong +// or outdated. +// +// Also, automatic detection of signature verification methods are always +// more vulnerable for potential attack vectors. +// +// It is highly recommended that you fix your key to contain a proper `alg` +// header field instead of resorting to using this option, but sometimes +// it just needs to happen. +func WithInferAlgorithmFromKey(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identInferAlgorithmFromKey{}, v)} +} + +func WithKeyProvider(v KeyProvider) VerifyOption { + return &verifyOption{option.New(identKeyProvider{}, v)} +} + +// WithKeyUsed allows you to specify the `jws.Verify()` function to +// return the key used for verification. This may be useful when +// you specify multiple key sources or if you pass a `jwk.Set` +// and you want to know which key was successful at verifying the +// signature. +// +// `v` must be a pointer to an empty `interface{}`. Do not use +// `jwk.Key` here unless you are 100% sure that all keys that you +// have provided are instances of `jwk.Key` (remember that the +// jwx API allows users to specify a raw key such as *rsa.PublicKey) +func WithKeyUsed(v interface{}) VerifyOption { + return &verifyOption{option.New(identKeyUsed{}, v)} +} + +// WithMessage can be passed to Verify() to obtain the jws.Message upon +// a successful verification. +func WithMessage(v *Message) VerifyOption { + return &verifyOption{option.New(identMessage{}, v)} +} + +// WithMultipleKeysPerKeyID specifies if we should expect multiple keys +// to match against a key ID. By default it is assumed that key IDs are +// unique, i.e. for a given key ID, the key set only contains a single +// key that has the matching ID. When this option is set to true, +// multiple keys that match the same key ID in the set can be tried. +func WithMultipleKeysPerKeyID(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identMultipleKeysPerKeyID{}, v)} +} + +// WithPretty specifies whether the JSON output should be formatted and +// indented +func WithPretty(v bool) WithJSONSuboption { + return &withJSONSuboption{option.New(identPretty{}, v)} +} + +// WithProtected is used with `jws.WithKey()` option when used with `jws.Sign()` +// to specify a protected header to be attached to the JWS signature. +// +// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` +func WithProtectedHeaders(v Headers) WithKeySuboption { + return &withKeySuboption{option.New(identProtectedHeaders{}, v)} +} + +// WithPublic is used with `jws.WithKey()` option when used with `jws.Sign()` +// to specify a public header to be attached to the JWS signature. +// +// It has no effect if used when `jws.WithKey()` is passed to `jws.Verify()` +// +// `jws.Sign()` will result in an error if `jws.WithPublic()` is used +// and the serialization format is compact serialization. +func WithPublicHeaders(v Headers) WithKeySuboption { + return &withKeySuboption{option.New(identPublicHeaders{}, v)} +} + +// WithRequiredKid specifies whether the keys in the jwk.Set should +// only be matched if the target JWS message's Key ID and the Key ID +// in the given key matches. +func WithRequireKid(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identRequireKid{}, v)} +} + +// WithCompact specifies that the result of `jws.Sign()` is serialized in +// compact format. +// +// By default `jws.Sign()` will opt to use compact format, so you usually +// do not need to specify this option other than to be explicit about it +func WithCompact() SignOption { + return &signOption{option.New(identSerialization{}, fmtCompact)} +} + +// WithUseDefault specifies that if and only if a jwk.Key contains +// exactly one jwk.Key, that tkey should be used. +// (I think this should be removed) +func WithUseDefault(v bool) WithKeySetSuboption { + return &withKeySetSuboption{option.New(identUseDefault{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go new file mode 100644 index 0000000000..e239330a23 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/rsa.go @@ -0,0 +1,142 @@ +package jws + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/keyconv" + "github.com/lestrrat-go/jwx/v2/jwa" +) + +var rsaSigners map[jwa.SignatureAlgorithm]*rsaSigner +var rsaVerifiers map[jwa.SignatureAlgorithm]*rsaVerifier + +func init() { + algs := map[jwa.SignatureAlgorithm]struct { + Hash crypto.Hash + PSS bool + }{ + jwa.RS256: { + Hash: crypto.SHA256, + }, + jwa.RS384: { + Hash: crypto.SHA384, + }, + jwa.RS512: { + Hash: crypto.SHA512, + }, + jwa.PS256: { + Hash: crypto.SHA256, + PSS: true, + }, + jwa.PS384: { + Hash: crypto.SHA384, + PSS: true, + }, + jwa.PS512: { + Hash: crypto.SHA512, + PSS: true, + }, + } + + rsaSigners = make(map[jwa.SignatureAlgorithm]*rsaSigner) + rsaVerifiers = make(map[jwa.SignatureAlgorithm]*rsaVerifier) + for alg, item := range algs { + rsaSigners[alg] = &rsaSigner{ + alg: alg, + hash: item.Hash, + pss: item.PSS, + } + rsaVerifiers[alg] = &rsaVerifier{ + alg: alg, + hash: item.Hash, + pss: item.PSS, + } + } +} + +type rsaSigner struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash + pss bool +} + +func newRSASigner(alg jwa.SignatureAlgorithm) Signer { + return rsaSigners[alg] +} + +func (rs *rsaSigner) Algorithm() jwa.SignatureAlgorithm { + return rs.alg +} + +func (rs *rsaSigner) Sign(payload []byte, key interface{}) ([]byte, error) { + if key == nil { + return nil, fmt.Errorf(`missing private key while signing payload`) + } + + signer, ok := key.(crypto.Signer) + if !ok { + var privkey rsa.PrivateKey + if err := keyconv.RSAPrivateKey(&privkey, key); err != nil { + return nil, fmt.Errorf(`failed to retrieve rsa.PrivateKey out of %T: %w`, key, err) + } + signer = &privkey + } + + h := rs.hash.New() + if _, err := h.Write(payload); err != nil { + return nil, fmt.Errorf(`failed to write payload to hash: %w`, err) + } + if rs.pss { + return signer.Sign(rand.Reader, h.Sum(nil), &rsa.PSSOptions{ + Hash: rs.hash, + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + return signer.Sign(rand.Reader, h.Sum(nil), rs.hash) +} + +type rsaVerifier struct { + alg jwa.SignatureAlgorithm + hash crypto.Hash + pss bool +} + +func newRSAVerifier(alg jwa.SignatureAlgorithm) Verifier { + return rsaVerifiers[alg] +} + +func (rv *rsaVerifier) Verify(payload, signature []byte, key interface{}) error { + if key == nil { + return fmt.Errorf(`missing public key while verifying payload`) + } + + var pubkey rsa.PublicKey + if cs, ok := key.(crypto.Signer); ok { + cpub := cs.Public() + switch cpub := cpub.(type) { + case rsa.PublicKey: + pubkey = cpub + case *rsa.PublicKey: + pubkey = *cpub + default: + return fmt.Errorf(`failed to retrieve rsa.PublicKey out of crypto.Signer %T`, key) + } + } else { + if err := keyconv.RSAPublicKey(&pubkey, key); err != nil { + return fmt.Errorf(`failed to retrieve rsa.PublicKey out of %T: %w`, key, err) + } + } + + h := rv.hash.New() + if _, err := h.Write(payload); err != nil { + return fmt.Errorf(`failed to write payload to hash: %w`, err) + } + + if rv.pss { + return rsa.VerifyPSS(&pubkey, rv.hash, h.Sum(nil), signature, nil) + } + return rsa.VerifyPKCS1v15(&pubkey, rv.hash, h.Sum(nil), signature) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go new file mode 100644 index 0000000000..46e73eb174 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/signer.go @@ -0,0 +1,69 @@ +package jws + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v2/jwa" +) + +type SignerFactory interface { + Create() (Signer, error) +} +type SignerFactoryFn func() (Signer, error) + +func (fn SignerFactoryFn) Create() (Signer, error) { + return fn() +} + +var signerDB map[jwa.SignatureAlgorithm]SignerFactory + +// RegisterSigner is used to register a factory object that creates +// Signer objects based on the given algorithm. +// +// For example, if you would like to provide a custom signer for +// jwa.EdDSA, use this function to register a `SignerFactory` +// (probably in your `init()`) +func RegisterSigner(alg jwa.SignatureAlgorithm, f SignerFactory) { + signerDB[alg] = f +} + +func init() { + signerDB = make(map[jwa.SignatureAlgorithm]SignerFactory) + + for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} { + RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return newRSASigner(alg), nil + }) + }(alg)) + } + + for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512, jwa.ES256K} { + RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return newECDSASigner(alg), nil + }) + }(alg)) + } + + for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} { + RegisterSigner(alg, func(alg jwa.SignatureAlgorithm) SignerFactory { + return SignerFactoryFn(func() (Signer, error) { + return newHMACSigner(alg), nil + }) + }(alg)) + } + + RegisterSigner(jwa.EdDSA, SignerFactoryFn(func() (Signer, error) { + return newEdDSASigner(), nil + })) +} + +// NewSigner creates a signer that signs payloads using the given signature algorithm. +func NewSigner(alg jwa.SignatureAlgorithm) (Signer, error) { + f, ok := signerDB[alg] + if ok { + return f.Create() + } + return nil, fmt.Errorf(`unsupported signature algorithm "%s"`, alg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go b/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go new file mode 100644 index 0000000000..8093f87958 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jws/verifier.go @@ -0,0 +1,69 @@ +package jws + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v2/jwa" +) + +type VerifierFactory interface { + Create() (Verifier, error) +} +type VerifierFactoryFn func() (Verifier, error) + +func (fn VerifierFactoryFn) Create() (Verifier, error) { + return fn() +} + +var verifierDB map[jwa.SignatureAlgorithm]VerifierFactory + +// RegisterVerifier is used to register a factory object that creates +// Verifier objects based on the given algorithm. +// +// For example, if you would like to provide a custom verifier for +// jwa.EdDSA, use this function to register a `VerifierFactory` +// (probably in your `init()`) +func RegisterVerifier(alg jwa.SignatureAlgorithm, f VerifierFactory) { + verifierDB[alg] = f +} + +func init() { + verifierDB = make(map[jwa.SignatureAlgorithm]VerifierFactory) + + for _, alg := range []jwa.SignatureAlgorithm{jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512} { + RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return newRSAVerifier(alg), nil + }) + }(alg)) + } + + for _, alg := range []jwa.SignatureAlgorithm{jwa.ES256, jwa.ES384, jwa.ES512, jwa.ES256K} { + RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return newECDSAVerifier(alg), nil + }) + }(alg)) + } + + for _, alg := range []jwa.SignatureAlgorithm{jwa.HS256, jwa.HS384, jwa.HS512} { + RegisterVerifier(alg, func(alg jwa.SignatureAlgorithm) VerifierFactory { + return VerifierFactoryFn(func() (Verifier, error) { + return newHMACVerifier(alg), nil + }) + }(alg)) + } + + RegisterVerifier(jwa.EdDSA, VerifierFactoryFn(func() (Verifier, error) { + return newEdDSAVerifier(), nil + })) +} + +// NewVerifier creates a verifier that signs payloads using the given signature algorithm. +func NewVerifier(alg jwa.SignatureAlgorithm) (Verifier, error) { + f, ok := verifierDB[alg] + if ok { + return f.Create() + } + return nil, fmt.Errorf(`unsupported signature algorithm "%s"`, alg) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/README.md b/vendor/github.com/lestrrat-go/jwx/v2/jwt/README.md new file mode 100644 index 0000000000..103cafcacf --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/README.md @@ -0,0 +1,224 @@ +# JWT [![Go Reference](https://pkg.go.dev/badge/github.com/lestrrat-go/jwx/v2/jwt.svg)](https://pkg.go.dev/github.com/lestrrat-go/jwx/v2/jwt) + +Package jwt implements JSON Web Tokens as described in [RFC7519](https://tools.ietf.org/html/rfc7519). + +* Convenience methods for oft-used keys ("aud", "sub", "iss", etc) +* Convenience functions to extract/parse from http.Request, http.Header, url.Values +* Ability to Get/Set arbitrary keys +* Conversion to and from JSON +* Generate signed tokens +* Verify signed tokens +* Extra support for OpenID tokens via [github.com/lestrrat-go/jwx/v2/jwt/openid](./jwt/openid) + +How-to style documentation can be found in the [docs directory](../docs). + +More examples are located in the examples directory ([jwt_example_test.go](../examples/jwt_example_test.go)) + +# SYNOPSIS + +## Verify a signed JWT + +```go + token, err := jwt.Parse(payload, jwt.WithKey(alg, key)) + if err != nil { + fmt.Printf("failed to parse payload: %s\n", err) + } +``` + +## Token Usage + +```go +func ExampleJWT() { + const aLongLongTimeAgo = 233431200 + + t := jwt.New() + t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v2/jwt`) + t.Set(jwt.AudienceKey, `Golang Users`) + t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0)) + t.Set(`privateClaimKey`, `Hello, World!`) + + buf, err := json.MarshalIndent(t, "", " ") + if err != nil { + fmt.Printf("failed to generate JSON: %s\n", err) + return + } + + fmt.Printf("%s\n", buf) + fmt.Printf("aud -> '%s'\n", t.Audience()) + fmt.Printf("iat -> '%s'\n", t.IssuedAt().Format(time.RFC3339)) + if v, ok := t.Get(`privateClaimKey`); ok { + fmt.Printf("privateClaimKey -> '%s'\n", v) + } + fmt.Printf("sub -> '%s'\n", t.Subject()) + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Printf("failed to generate private key: %s", err) + return + } + + { + // Signing a token (using raw rsa.PrivateKey) + signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, key)) + if err != nil { + log.Printf("failed to sign token: %s", err) + return + } + _ = signed + } + + { + // Signing a token (using JWK) + jwkKey, err := jwk.New(key) + if err != nil { + log.Printf("failed to create JWK key: %s", err) + return + } + + signed, err := jwt.Sign(t, jwt.WithKey(jwa.RS256, jwkKey)) + if err != nil { + log.Printf("failed to sign token: %s", err) + return + } + _ = signed + } +} +``` + +## OpenID Claims + +`jwt` package can work with token types other than the default one. +For OpenID claims, use the token created by `openid.New()`, or +use the `jwt.WithToken(openid.New())`. If you need to use other specialized +claims, use `jwt.WithToken()` to specify the exact token type + +```go +func Example_openid() { + const aLongLongTimeAgo = 233431200 + + t := openid.New() + t.Set(jwt.SubjectKey, `https://github.com/lestrrat-go/jwx/v2/jwt`) + t.Set(jwt.AudienceKey, `Golang Users`) + t.Set(jwt.IssuedAtKey, time.Unix(aLongLongTimeAgo, 0)) + t.Set(`privateClaimKey`, `Hello, World!`) + + addr := openid.NewAddress() + addr.Set(openid.AddressPostalCodeKey, `105-0011`) + addr.Set(openid.AddressCountryKey, `日本`) + addr.Set(openid.AddressRegionKey, `東京都`) + addr.Set(openid.AddressLocalityKey, `港区`) + addr.Set(openid.AddressStreetAddressKey, `芝公園 4-2-8`) + t.Set(openid.AddressKey, addr) + + buf, err := json.MarshalIndent(t, "", " ") + if err != nil { + fmt.Printf("failed to generate JSON: %s\n", err) + return + } + fmt.Printf("%s\n", buf) + + t2, err := jwt.Parse(buf, jwt.WithToken(openid.New())) + if err != nil { + fmt.Printf("failed to parse JSON: %s\n", err) + return + } + if _, ok := t2.(openid.Token); !ok { + fmt.Printf("using jwt.WithToken(openid.New()) creates an openid.Token instance") + return + } +} +``` + +# FAQ + +## Why is `jwt.Token` an interface? + +In this package, `jwt.Token` is an interface. This is not an arbitrary choice: there are actual reason for the type being an interface. + +We understand that if you are migrating from another library this may be a deal breaker, but we hope you can at least appreciate the fact that this was not done arbitrarily, and that there were real technical trade offs that were evaluated. + +### No uninitialized tokens + +First and foremost, by making it an interface, you cannot use an uninitialized token: + +```go +var token1 jwt.Token // this is nil, you can't just start using this +if err := json.Unmarshal(data, &token1); err != nil { // so you can't do this + ... +} + +// But you _can_ do this, and we _want_ you to do this so the object is properly initialized +token2 = jwt.New() +if err := json.Unmarshal(data, &token2); err != nil { // actually, in practice you should use jwt.Parse() + .... +} +``` + +### But why does it need to be initialized? + +There are several reasons, but one of the reasons is that I'm using a sync.Mutex to avoid races. We want this to be properly initialized. + +The other reason is that we support custom claims out of the box. The `map[string]interface{}` container is initialized during new. This is important when checking for equality using reflect-y methods (akin to `reflect.DeepEqual`), because if you allowed zero values, you could end up with "empty" tokens, that actually differ. Consider the following: + +```go +// assume jwt.Token was s struct, not an interface +token1 := jwt.Token{ privateClaims: make(map[string]interface{}) } +token2 := jwt.Token{ privateClaims: nil } +``` + +These are semantically equivalent, but users would need to be aware of this difference when comparing values. By forcing the user to use a constructor, we can force a uniform empty state. + +### Standard way to store values + +Unlike some other libraries, this library allows you to store standard claims and non-standard claims in the same token. + +You _want_ to store standard claims in a properly typed field, which we do for fields like "iss", "nbf", etc. +But for non-standard claims, there is just no way of doing this, so we _have_ to use a container like `map[string]interface{}` + +This means that if you allow direct access to these fields via a struct, you will have two different ways to access the claims, which is confusing: + +```go +tok.Issuer = ... +tok.PrivateClaims["foo"] = ... +``` + +So we want to hide where this data is stored, and use a standard method like `Set()` and `Get()` to store all the values. +At this point you are effectively going to hide the implementation detail from the user, so you end up with a struct like below, which is fundamentally not so different from providing just an interface{}: + +```go +type Token struct { + // unexported fields +} + +func (tok *Token) Set(...) { ... } +``` + +### Use of pointers to store values + +We wanted to differentiate the state between a claim being uninitialized, and a claim being initialized to empty. + +So we use pointers to store values: + +```go +type stdToken struct { + .... + issuer *string // if nil, uninitialized. if &(""), initialized to empty +} +``` + +This is fine for us, but we doubt that this would be something users would want to do. +This is a subtle difference, but cluttering up the API with slight variations of the same type (i.e. pointers vs non-pointers) seemed like a bad idea to us. + +```go +token.Issuer = &issuer // want to avoid this + +token.Set(jwt.IssuerKey, "foobar") // so this is what we picked +``` + +This way users no longer need to care how the data is internally stored. + +### Allow more than one type of token through the same interface + +`dgrijalva/jwt-go` does this in a different way, but we felt that it would be more intuitive for all tokens to follow a single interface so there is fewer type conversions required. + +See the `openid` token for an example. diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/builder_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/builder_gen.go new file mode 100644 index 0000000000..a588bc690d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/builder_gen.go @@ -0,0 +1,70 @@ +// This file is auto-generated by jwt/internal/cmd/gentoken/main.go. DO NOT EDIT + +package jwt + +import ( + "fmt" + "time" +) + +// Builder is a convenience wrapper around the New() constructor +// and the Set() methods to assign values to Token claims. +// Users can successively call Claim() on the Builder, and have it +// construct the Token when Build() is called. This alleviates the +// need for the user to check for the return value of every single +// Set() method call. +// Note that each call to Claim() overwrites the value set from the +// previous call. +type Builder struct { + claims []*ClaimPair +} + +func NewBuilder() *Builder { + return &Builder{} +} + +func (b *Builder) Claim(name string, value interface{}) *Builder { + b.claims = append(b.claims, &ClaimPair{Key: name, Value: value}) + return b +} + +func (b *Builder) Audience(v []string) *Builder { + return b.Claim(AudienceKey, v) +} + +func (b *Builder) Expiration(v time.Time) *Builder { + return b.Claim(ExpirationKey, v) +} + +func (b *Builder) IssuedAt(v time.Time) *Builder { + return b.Claim(IssuedAtKey, v) +} + +func (b *Builder) Issuer(v string) *Builder { + return b.Claim(IssuerKey, v) +} + +func (b *Builder) JwtID(v string) *Builder { + return b.Claim(JwtIDKey, v) +} + +func (b *Builder) NotBefore(v time.Time) *Builder { + return b.Claim(NotBeforeKey, v) +} + +func (b *Builder) Subject(v string) *Builder { + return b.Claim(SubjectKey, v) +} + +// Build creates a new token based on the claims that the builder has received +// so far. If a claim cannot be set, then the method returns a nil Token with +// a en error as a second return value +func (b *Builder) Build() (Token, error) { + tok := New() + for _, claim := range b.claims { + if err := tok.Set(claim.Key.(string), claim.Value); err != nil { + return nil, fmt.Errorf(`failed to set claim %q: %w`, claim.Key.(string), err) + } + } + return tok, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/http.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/http.go new file mode 100644 index 0000000000..a8edc6036e --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/http.go @@ -0,0 +1,188 @@ +package jwt + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/lestrrat-go/jwx/v2/internal/pool" +) + +// ParseHeader parses a JWT stored in a http.Header. +// +// For the header "Authorization", it will strip the prefix "Bearer " and will +// treat the remaining value as a JWT. +func ParseHeader(hdr http.Header, name string, options ...ParseOption) (Token, error) { + key := http.CanonicalHeaderKey(name) + v := strings.TrimSpace(hdr.Get(key)) + if v == "" { + return nil, fmt.Errorf(`empty header (%s)`, key) + } + + if key == "Authorization" { + // Authorization header is an exception. We strip the "Bearer " from + // the prefix + v = strings.TrimSpace(strings.TrimPrefix(v, "Bearer")) + } + + return ParseString(v, options...) +} + +// ParseForm parses a JWT stored in a url.Value. +func ParseForm(values url.Values, name string, options ...ParseOption) (Token, error) { + v := strings.TrimSpace(values.Get(name)) + if v == "" { + return nil, fmt.Errorf(`empty value (%s)`, name) + } + + return ParseString(v, options...) +} + +// ParseRequest searches a http.Request object for a JWT token. +// +// Specifying WithHeaderKey() will tell it to search under a specific +// header key. Specifying WithFormKey() will tell it to search under +// a specific form field. +// +// By default, "Authorization" header will be searched. +// +// If WithHeaderKey() is used, you must explicitly re-enable searching for "Authorization" header. +// +// # searches for "Authorization" +// jwt.ParseRequest(req) +// +// # searches for "x-my-token" ONLY. +// jwt.ParseRequest(req, jwt.WithHeaderKey("x-my-token")) +// +// # searches for "Authorization" AND "x-my-token" +// jwt.ParseRequest(req, jwt.WithHeaderKey("Authorization"), jwt.WithHeaderKey("x-my-token")) +func ParseRequest(req *http.Request, options ...ParseOption) (Token, error) { + var hdrkeys []string + var formkeys []string + var parseOptions []ParseOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identHeaderKey{}: + hdrkeys = append(hdrkeys, option.Value().(string)) + case identFormKey{}: + formkeys = append(formkeys, option.Value().(string)) + default: + parseOptions = append(parseOptions, option) + } + } + if len(hdrkeys) == 0 { + hdrkeys = append(hdrkeys, "Authorization") + } + + mhdrs := pool.GetKeyToErrorMap() + defer pool.ReleaseKeyToErrorMap(mhdrs) + mfrms := pool.GetKeyToErrorMap() + defer pool.ReleaseKeyToErrorMap(mfrms) + + for _, hdrkey := range hdrkeys { + // Check presence via a direct map lookup + if _, ok := req.Header[http.CanonicalHeaderKey(hdrkey)]; !ok { + // if non-existent, not error + continue + } + + tok, err := ParseHeader(req.Header, hdrkey, parseOptions...) + if err != nil { + mhdrs[hdrkey] = err + continue + } + return tok, nil + } + + if cl := req.ContentLength; cl > 0 { + if err := req.ParseForm(); err != nil { + return nil, fmt.Errorf(`failed to parse form: %w`, err) + } + } + + for _, formkey := range formkeys { + // Check presence via a direct map lookup + if _, ok := req.Form[formkey]; !ok { + // if non-existent, not error + continue + } + + tok, err := ParseForm(req.Form, formkey, parseOptions...) + if err != nil { + mfrms[formkey] = err + continue + } + return tok, nil + } + + // Everything below is a preulde to error reporting. + var triedHdrs strings.Builder + for i, hdrkey := range hdrkeys { + if i > 0 { + triedHdrs.WriteString(", ") + } + triedHdrs.WriteString(strconv.Quote(hdrkey)) + } + + var triedForms strings.Builder + for i, formkey := range formkeys { + if i > 0 { + triedForms.WriteString(", ") + } + triedForms.WriteString(strconv.Quote(formkey)) + } + + var b strings.Builder + b.WriteString(`failed to find a valid token in any location of the request (tried: [header keys: `) + b.WriteString(triedHdrs.String()) + b.WriteByte(']') + if triedForms.Len() > 0 { + b.WriteString(", form keys: [") + b.WriteString(triedForms.String()) + b.WriteByte(']') + } + b.WriteByte(')') + + lmhdrs := len(mhdrs) + lmfrms := len(mfrms) + if lmhdrs > 0 || lmfrms > 0 { + b.WriteString(". Additionally, errors were encountered during attempts to parse") + + if lmhdrs > 0 { + b.WriteString(" headers: (") + count := 0 + for hdrkey, err := range mhdrs { + if count > 0 { + b.WriteString(", ") + } + b.WriteString("[header key: ") + b.WriteString(strconv.Quote(hdrkey)) + b.WriteString(", error: ") + b.WriteString(strconv.Quote(err.Error())) + b.WriteString("]") + count++ + } + b.WriteString(")") + } + + if lmfrms > 0 { + count := 0 + b.WriteString(" forms: (") + for formkey, err := range mfrms { + if count > 0 { + b.WriteString(", ") + } + b.WriteString("[form key: ") + b.WriteString(strconv.Quote(formkey)) + b.WriteString(", error: ") + b.WriteString(strconv.Quote(err.Error())) + b.WriteString("]") + count++ + } + } + } + return nil, fmt.Errorf(b.String()) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/interface.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/interface.go new file mode 100644 index 0000000000..3a4352e10c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/interface.go @@ -0,0 +1,14 @@ +package jwt + +import ( + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" +) + +type ClaimPair = mapiter.Pair +type Iterator = mapiter.Iterator +type Visitor = iter.MapVisitor +type VisitorFunc = iter.MapVisitorFunc +type DecodeCtx = json.DecodeCtx +type TokenWithDecodeCtx = json.DecodeCtxContainer diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/date.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/date.go new file mode 100644 index 0000000000..0878397f65 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/date.go @@ -0,0 +1,191 @@ +package types + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/lestrrat-go/jwx/v2/internal/json" +) + +const ( + DefaultPrecision uint32 = 0 // second level + MaxPrecision uint32 = 9 // nanosecond level +) + +var Pedantic uint32 +var ParsePrecision = DefaultPrecision +var FormatPrecision = DefaultPrecision + +// NumericDate represents the date format used in the 'nbf' claim +type NumericDate struct { + time.Time +} + +func (n *NumericDate) Get() time.Time { + if n == nil { + return (time.Time{}).UTC() + } + return n.Time +} + +func intToTime(v interface{}, t *time.Time) bool { + var n int64 + switch x := v.(type) { + case int64: + n = x + case int32: + n = int64(x) + case int16: + n = int64(x) + case int8: + n = int64(x) + case int: + n = int64(x) + default: + return false + } + + *t = time.Unix(n, 0) + return true +} + +func parseNumericString(x string) (time.Time, error) { + var t time.Time // empty time for empty return value + + // Only check for the escape hatch if it's the pedantic + // flag is off + if Pedantic != 1 { + // This is an escape hatch for non-conformant providers + // that gives us RFC3339 instead of epoch time + for _, r := range x { + // 0x30 = '0', 0x39 = '9', 0x2E = '.' + if (r >= 0x30 && r <= 0x39) || r == 0x2E { + continue + } + + // if it got here, then it probably isn't epoch time + tv, err := time.Parse(time.RFC3339, x) + if err != nil { + return t, fmt.Errorf(`value is not number of seconds since the epoch, and attempt to parse it as RFC3339 timestamp failed: %w`, err) + } + return tv, nil + } + } + + var fractional string + whole := x + if i := strings.IndexRune(x, '.'); i > 0 { + if ParsePrecision > 0 && len(x) > i+1 { + fractional = x[i+1:] // everything after the '.' + if int(ParsePrecision) < len(fractional) { + // Remove insignificant digits + fractional = fractional[:int(ParsePrecision)] + } + // Replace missing fractional diits with zeros + for len(fractional) < int(MaxPrecision) { + fractional = fractional + "0" + } + } + whole = x[:i] + } + n, err := strconv.ParseInt(whole, 10, 64) + if err != nil { + return t, fmt.Errorf(`failed to parse whole value %q: %w`, whole, err) + } + var nsecs int64 + if fractional != "" { + v, err := strconv.ParseInt(fractional, 10, 64) + if err != nil { + return t, fmt.Errorf(`failed to parse fractional value %q: %w`, fractional, err) + } + nsecs = v + } + + return time.Unix(n, nsecs).UTC(), nil +} + +func (n *NumericDate) Accept(v interface{}) error { + var t time.Time + switch x := v.(type) { + case float32: + tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x)) + if err != nil { + return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err) + } + t = tv + case float64: + tv, err := parseNumericString(fmt.Sprintf(`%.9f`, x)) + if err != nil { + return fmt.Errorf(`failed to accept float32 %.9f: %w`, x, err) + } + t = tv + case json.Number: + tv, err := parseNumericString(x.String()) + if err != nil { + return fmt.Errorf(`failed to accept json.Number %q: %w`, x.String(), err) + } + t = tv + case string: + tv, err := parseNumericString(x) + if err != nil { + return fmt.Errorf(`failed to accept string %q: %w`, x, err) + } + t = tv + case time.Time: + t = x + default: + if !intToTime(v, &t) { + return fmt.Errorf(`invalid type %T`, v) + } + } + n.Time = t.UTC() + return nil +} + +func (n NumericDate) String() string { + if FormatPrecision == 0 { + return strconv.FormatInt(n.Unix(), 10) + } + + // This is cheating,but it's better (easier) than doing floating point math + // We basically munge with strings after formatting an integer balue + // for nanoseconds since epoch + s := strconv.FormatInt(n.UnixNano(), 10) + for len(s) < int(MaxPrecision) { + s = "0" + s + } + + slwhole := len(s) - int(MaxPrecision) + s = s[:slwhole] + "." + s[slwhole:slwhole+int(FormatPrecision)] + if s[0] == '.' { + s = "0" + s + } + + return s +} + +// MarshalJSON translates from internal representation to JSON NumericDate +// See https://tools.ietf.org/html/rfc7519#page-6 +func (n *NumericDate) MarshalJSON() ([]byte, error) { + if n.IsZero() { + return json.Marshal(nil) + } + + return json.Marshal(n.String()) +} + +func (n *NumericDate) UnmarshalJSON(data []byte) error { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return fmt.Errorf(`failed to unmarshal date: %w`, err) + } + + var n2 NumericDate + if err := n2.Accept(v); err != nil { + return fmt.Errorf(`invalid value for NumericDate: %w`, err) + } + *n = n2 + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/string.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/string.go new file mode 100644 index 0000000000..eb67aefbea --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/internal/types/string.go @@ -0,0 +1,43 @@ +package types + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/json" +) + +type StringList []string + +func (l StringList) Get() []string { + return []string(l) +} + +func (l *StringList) Accept(v interface{}) error { + switch x := v.(type) { + case string: + *l = StringList([]string{x}) + case []string: + *l = StringList(x) + case []interface{}: + list := make(StringList, len(x)) + for i, e := range x { + if s, ok := e.(string); ok { + list[i] = s + continue + } + return fmt.Errorf(`invalid list element type %T`, e) + } + *l = list + default: + return fmt.Errorf(`invalid type: %T`, v) + } + return nil +} + +func (l *StringList) UnmarshalJSON(data []byte) error { + var v interface{} + if err := json.Unmarshal(data, &v); err != nil { + return fmt.Errorf(`failed to unmarshal data: %w`, err) + } + return l.Accept(v) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/io.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/io.go new file mode 100644 index 0000000000..ad5db4b87a --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/io.go @@ -0,0 +1,42 @@ +// Automatically generated by internal/cmd/genreadfile/main.go. DO NOT EDIT + +package jwt + +import ( + "io/fs" + "os" +) + +type sysFS struct{} + +func (sysFS) Open(path string) (fs.File, error) { + return os.Open(path) +} + +func ReadFile(path string, options ...ReadFileOption) (Token, error) { + var parseOptions []ParseOption + var readFileOptions []ReadFileOption + for _, option := range options { + if po, ok := option.(ParseOption); ok { + parseOptions = append(parseOptions, po) + } else { + readFileOptions = append(readFileOptions, option) + } + } + + var srcFS fs.FS = sysFS{} + for _, option := range options { + switch option.Ident() { + case identFS{}: + srcFS = option.Value().(fs.FS) + } + } + + f, err := srcFS.Open(path) + if err != nil { + return nil, err + } + + defer f.Close() + return ParseReader(f, parseOptions...) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/jwt.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/jwt.go new file mode 100644 index 0000000000..cd059a2632 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/jwt.go @@ -0,0 +1,470 @@ +//go:generate ../tools/cmd/genjwt.sh +//go:generate stringer -type=TokenOption -output=token_options_gen.go + +// Package jwt implements JSON Web Tokens as described in https://tools.ietf.org/html/rfc7519 +package jwt + +import ( + "bytes" + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/lestrrat-go/jwx/v2" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/jwx/v2/jwt/internal/types" +) + +var errInvalidJWT = errors.New(`invalid JWT`) + +// ErrInvalidJWT returns the opaque error value that is returned when +// `jwt.Parse` fails due to not being able to deduce the format of +// the incoming buffer +func ErrInvalidJWT() error { + return errInvalidJWT +} + +// Settings controls global settings that are specific to JWTs. +func Settings(options ...GlobalOption) { + var flattenAudienceBool bool + var parsePedantic bool + var parsePrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set + var formatPrecision = types.MaxPrecision + 1 // illegal value, so we can detect nothing was set + + //nolint:forcetypeassert + for _, option := range options { + switch option.Ident() { + case identFlattenAudience{}: + flattenAudienceBool = option.Value().(bool) + case identNumericDateParsePedantic{}: + parsePedantic = option.Value().(bool) + case identNumericDateParsePrecision{}: + v := option.Value().(int) + // only accept this value if it's in our desired range + if v >= 0 && v <= int(types.MaxPrecision) { + parsePrecision = uint32(v) + } + case identNumericDateFormatPrecision{}: + v := option.Value().(int) + // only accept this value if it's in our desired range + if v >= 0 && v <= int(types.MaxPrecision) { + formatPrecision = uint32(v) + } + } + } + + if parsePrecision <= types.MaxPrecision { // remember we set default to max + 1 + v := atomic.LoadUint32(&types.ParsePrecision) + if v != parsePrecision { + atomic.CompareAndSwapUint32(&types.ParsePrecision, v, parsePrecision) + } + } + + if formatPrecision <= types.MaxPrecision { // remember we set default to max + 1 + v := atomic.LoadUint32(&types.FormatPrecision) + if v != formatPrecision { + atomic.CompareAndSwapUint32(&types.FormatPrecision, v, formatPrecision) + } + } + + { + v := atomic.LoadUint32(&types.Pedantic) + if (v == 1) != parsePedantic { + var newVal uint32 + if parsePedantic { + newVal = 1 + } + atomic.CompareAndSwapUint32(&types.Pedantic, v, newVal) + } + } + + { + defaultOptionsMu.Lock() + if flattenAudienceBool { + defaultOptions.Enable(FlattenAudience) + } else { + defaultOptions.Disable(FlattenAudience) + } + defaultOptionsMu.Unlock() + } +} + +var registry = json.NewRegistry() + +// ParseString calls Parse against a string +func ParseString(s string, options ...ParseOption) (Token, error) { + return parseBytes([]byte(s), options...) +} + +// Parse parses the JWT token payload and creates a new `jwt.Token` object. +// The token must be encoded in either JSON format or compact format. +// +// This function can only work with either raw JWT (JSON) and JWS (Compact or JSON). +// If you need JWE support on top of it, you will need to rollout your +// own workaround. +// +// If the token is signed and you want to verify the payload matches the signature, +// you must pass the jwt.WithKey(alg, key) or jwt.WithKeySet(jwk.Set) option. +// If you do not specify these parameters, no verification will be performed. +// +// During verification, if the JWS headers specify a key ID (`kid`), the +// key used for verification must match the specified ID. If you are somehow +// using a key without a `kid` (which is highly unlikely if you are working +// with a JWT from a well know provider), you can workaround this by modifying +// the `jwk.Key` and setting the `kid` header. +// +// If you also want to assert the validity of the JWT itself (i.e. expiration +// and such), use the `Validate()` function on the returned token, or pass the +// `WithValidate(true)` option. Validate options can also be passed to +// `Parse` +// +// This function takes both ParseOption and ValidateOption types: +// ParseOptions control the parsing behavior, and ValidateOptions are +// passed to `Validate()` when `jwt.WithValidate` is specified. +func Parse(s []byte, options ...ParseOption) (Token, error) { + return parseBytes(s, options...) +} + +// ParseInsecure is exactly the same as Parse(), but it disables +// signature verification and token validation. +// +// You cannot override `jwt.WithVerify()` or `jwt.WithValidate()` +// using this function. Providing these options would result in +// an error +func ParseInsecure(s []byte, options ...ParseOption) (Token, error) { + for _, option := range options { + switch option.Ident() { + case identVerify{}, identValidate{}: + return nil, fmt.Errorf(`jwt.ParseInsecure: jwt.WithVerify() and jwt.WithValidate() may not be specified`) + } + } + + options = append(options, WithVerify(false), WithValidate(false)) + return Parse(s, options...) +} + +// ParseReader calls Parse against an io.Reader +func ParseReader(src io.Reader, options ...ParseOption) (Token, error) { + // We're going to need the raw bytes regardless. Read it. + data, err := io.ReadAll(src) + if err != nil { + return nil, fmt.Errorf(`failed to read from token data source: %w`, err) + } + return parseBytes(data, options...) +} + +type parseCtx struct { + token Token + validateOpts []ValidateOption + verifyOpts []jws.VerifyOption + localReg *json.Registry + pedantic bool + skipVerification bool + validate bool +} + +func parseBytes(data []byte, options ...ParseOption) (Token, error) { + var ctx parseCtx + + // Validation is turned on by default. You need to specify + // jwt.WithValidate(false) if you want to disable it + ctx.validate = true + + // Verification is required (i.e., it is assumed that the incoming + // data is in JWS format) unless the user explicitly asks for + // it to be skipped. + verification := true + + var verifyOpts []Option + for _, o := range options { + if v, ok := o.(ValidateOption); ok { + ctx.validateOpts = append(ctx.validateOpts, v) + continue + } + + //nolint:forcetypeassert + switch o.Ident() { + case identKey{}, identKeySet{}, identVerifyAuto{}, identKeyProvider{}: + verifyOpts = append(verifyOpts, o) + case identToken{}: + token, ok := o.Value().(Token) + if !ok { + return nil, fmt.Errorf(`invalid token passed via WithToken() option (%T)`, o.Value()) + } + ctx.token = token + case identPedantic{}: + ctx.pedantic = o.Value().(bool) + case identValidate{}: + ctx.validate = o.Value().(bool) + case identVerify{}: + verification = o.Value().(bool) + case identTypedClaim{}: + pair := o.Value().(claimPair) + if ctx.localReg == nil { + ctx.localReg = json.NewRegistry() + } + ctx.localReg.Register(pair.Name, pair.Value) + } + } + + lvo := len(verifyOpts) + if lvo == 0 && verification { + return nil, fmt.Errorf(`jwt.Parse: no keys for verification are provided (use jwt.WithVerify(false) to explicitly skip)`) + } + + if lvo > 0 { + converted, err := toVerifyOptions(verifyOpts...) + if err != nil { + return nil, fmt.Errorf(`jwt.Parse: failed to convert options into jws.VerifyOption: %w`, err) + } + ctx.verifyOpts = converted + } + + data = bytes.TrimSpace(data) + return parse(&ctx, data) +} + +const ( + _JwsVerifyInvalid = iota + _JwsVerifyDone + _JwsVerifyExpectNested + _JwsVerifySkipped +) + +var _ = _JwsVerifyInvalid + +func verifyJWS(ctx *parseCtx, payload []byte) ([]byte, int, error) { + if len(ctx.verifyOpts) == 0 { + return nil, _JwsVerifySkipped, nil + } + + verified, err := jws.Verify(payload, ctx.verifyOpts...) + return verified, _JwsVerifyDone, err +} + +// verify parameter exists to make sure that we don't accidentally skip +// over verification just because alg == "" or key == nil or something. +func parse(ctx *parseCtx, data []byte) (Token, error) { + payload := data + const maxDecodeLevels = 2 + + // If cty = `JWT`, we expect this to be a nested structure + var expectNested bool + +OUTER: + for i := 0; i < maxDecodeLevels; i++ { + switch kind := jwx.GuessFormat(payload); kind { + case jwx.JWT: + if ctx.pedantic { + if expectNested { + return nil, fmt.Errorf(`expected nested encrypted/signed payload, got raw JWT`) + } + } + + if i == 0 { + // We were NOT enveloped in other formats + if !ctx.skipVerification { + if _, _, err := verifyJWS(ctx, payload); err != nil { + return nil, err + } + } + } + + break OUTER + case jwx.InvalidFormat: + return nil, ErrInvalidJWT() + case jwx.UnknownFormat: + // "Unknown" may include invalid JWTs, for example, those who lack "aud" + // claim. We could be pedantic and reject these + if ctx.pedantic { + return nil, fmt.Errorf(`unknown JWT format (pedantic)`) + } + + if i == 0 { + // We were NOT enveloped in other formats + if !ctx.skipVerification { + if _, _, err := verifyJWS(ctx, payload); err != nil { + return nil, err + } + } + } + break OUTER + case jwx.JWS: + // Food for thought: This is going to break if you have multiple layers of + // JWS enveloping using different keys. It is highly unlikely use case, + // but it might happen. + + // skipVerification should only be set to true by us. It's used + // when we just want to parse the JWT out of a payload + if !ctx.skipVerification { + // nested return value means: + // false (next envelope _may_ need to be processed) + // true (next envelope MUST be processed) + v, state, err := verifyJWS(ctx, payload) + if err != nil { + return nil, err + } + + if state != _JwsVerifySkipped { + payload = v + + // We only check for cty and typ if the pedantic flag is enabled + if !ctx.pedantic { + continue + } + + if state == _JwsVerifyExpectNested { + expectNested = true + continue OUTER + } + + // if we're not nested, we found our target. bail out of this loop + break OUTER + } + } + + // No verification. + m, err := jws.Parse(data) + if err != nil { + return nil, fmt.Errorf(`invalid jws message: %w`, err) + } + payload = m.Payload() + default: + return nil, fmt.Errorf(`unsupported format (layer: #%d)`, i+1) + } + expectNested = false + } + + if ctx.token == nil { + ctx.token = New() + } + + if ctx.localReg != nil { + dcToken, ok := ctx.token.(TokenWithDecodeCtx) + if !ok { + return nil, fmt.Errorf(`typed claim was requested, but the token (%T) does not support DecodeCtx`, ctx.token) + } + dc := json.NewDecodeCtx(ctx.localReg) + dcToken.SetDecodeCtx(dc) + defer func() { dcToken.SetDecodeCtx(nil) }() + } + + if err := json.Unmarshal(payload, ctx.token); err != nil { + return nil, fmt.Errorf(`failed to parse token: %w`, err) + } + + if ctx.validate { + if err := Validate(ctx.token, ctx.validateOpts...); err != nil { + return nil, err + } + } + return ctx.token, nil +} + +// Sign is a convenience function to create a signed JWT token serialized in +// compact form. +// +// It accepts either a raw key (e.g. rsa.PrivateKey, ecdsa.PrivateKey, etc) +// or a jwk.Key, and the name of the algorithm that should be used to sign +// the token. +// +// If the key is a jwk.Key and the key contains a key ID (`kid` field), +// then it is added to the protected header generated by the signature +// +// The algorithm specified in the `alg` parameter must be able to support +// the type of key you provided, otherwise an error is returned. +// For convenience `alg` is of type jwa.KeyAlgorithm so you can pass +// the return value of `(jwk.Key).Algorithm()` directly, but in practice +// it must be an instance of jwa.SignatureAlgorithm, otherwise an error +// is returned. +// +// The protected header will also automatically have the `typ` field set +// to the literal value `JWT`, unless you provide a custom value for it +// by jwt.WithHeaders option. +func Sign(t Token, options ...SignOption) ([]byte, error) { + var soptions []jws.SignOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toSignOptions(rawoptions...) + if err != nil { + return nil, fmt.Errorf(`jwt.Sign: failed to convert options into jws.SignOption: %w`, err) + } + soptions = converted + } + return NewSerializer().sign(soptions...).Serialize(t) +} + +// Equal compares two JWT tokens. Do not use `reflect.Equal` or the like +// to compare tokens as they will also compare extra detail such as +// sync.Mutex objects used to control concurrent access. +// +// The comparison for values is currently done using a simple equality ("=="), +// except for time.Time, which uses time.Equal after dropping the monotonic +// clock and truncating the values to 1 second accuracy. +// +// if both t1 and t2 are nil, returns true +func Equal(t1, t2 Token) bool { + if t1 == nil && t2 == nil { + return true + } + + // we already checked for t1 == t2 == nil, so safe to do this + if t1 == nil || t2 == nil { + return false + } + + j1, err := json.Marshal(t1) + if err != nil { + return false + } + + j2, err := json.Marshal(t2) + if err != nil { + return false + } + + return bytes.Equal(j1, j2) +} + +func (t *stdToken) Clone() (Token, error) { + dst := New() + + dst.Options().Set(*(t.Options())) + for _, pair := range t.makePairs() { + //nolint:forcetypeassert + key := pair.Key.(string) + if err := dst.Set(key, pair.Value); err != nil { + return nil, fmt.Errorf(`failed to set %s: %w`, key, err) + } + } + return dst, nil +} + +// RegisterCustomField allows users to specify that a private field +// be decoded as an instance of the specified type. This option has +// a global effect. +// +// For example, suppose you have a custom field `x-birthday`, which +// you want to represent as a string formatted in RFC3339 in JSON, +// but want it back as `time.Time`. +// +// In that case you would register a custom field as follows +// +// jwt.RegisterCustomField(`x-birthday`, timeT) +// +// Then `token.Get("x-birthday")` will still return an `interface{}`, +// but you can convert its type to `time.Time` +// +// bdayif, _ := token.Get(`x-birthday`) +// bday := bdayif.(time.Time) +func RegisterCustomField(name string, object interface{}) { + registry.Register(name, object) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.go new file mode 100644 index 0000000000..fe61faeec3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.go @@ -0,0 +1,296 @@ +package jwt + +import ( + "fmt" + "time" + + "github.com/lestrrat-go/jwx/v2/jwa" + "github.com/lestrrat-go/jwx/v2/jwe" + "github.com/lestrrat-go/jwx/v2/jwk" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/option" +) + +type identKey struct{} +type identKeySet struct{} +type identTypedClaim struct{} +type identVerifyAuto struct{} + +func toSignOptions(options ...Option) ([]jws.SignOption, error) { + var soptions []jws.SignOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identKey{}: + wk := option.Value().(*withKey) // this always succeeds + var wksoptions []jws.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jws.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + soptions = append(soptions, jws.WithKey(wk.alg, wk.key, wksoptions...)) + } + } + return soptions, nil +} + +func toEncryptOptions(options ...Option) ([]jwe.EncryptOption, error) { + var soptions []jwe.EncryptOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identKey{}: + wk := option.Value().(*withKey) // this always succeeds + var wksoptions []jwe.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jwe.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jwe.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + soptions = append(soptions, jwe.WithKey(wk.alg, wk.key, wksoptions...)) + } + } + return soptions, nil +} + +func toVerifyOptions(options ...Option) ([]jws.VerifyOption, error) { + var voptions []jws.VerifyOption + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identKey{}: + wk := option.Value().(*withKey) // this always succeeds + var wksoptions []jws.WithKeySuboption + for _, subopt := range wk.options { + wksopt, ok := subopt.(jws.WithKeySuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySuboption, but got %T`, subopt) + } + wksoptions = append(wksoptions, wksopt) + } + + voptions = append(voptions, jws.WithKey(wk.alg, wk.key, wksoptions...)) + case identKeySet{}: + wks := option.Value().(*withKeySet) // this always succeeds + var wkssoptions []jws.WithKeySetSuboption + for _, subopt := range wks.options { + wkssopt, ok := subopt.(jws.WithKeySetSuboption) + if !ok { + return nil, fmt.Errorf(`expected optional arguments in jwt.WithKey to be jws.WithKeySetSuboption, but got %T`, subopt) + } + wkssoptions = append(wkssoptions, wkssopt) + } + + voptions = append(voptions, jws.WithKeySet(wks.set, wkssoptions...)) + case identVerifyAuto{}: + // this one doesn't need conversion. just get the stored option + voptions = append(voptions, option.Value().(jws.VerifyOption)) + case identKeyProvider{}: + kp, ok := option.Value().(jws.KeyProvider) + if !ok { + return nil, fmt.Errorf(`expected jws.KeyProvider, got %T`, option.Value()) + } + voptions = append(voptions, jws.WithKeyProvider(kp)) + } + } + return voptions, nil +} + +type withKey struct { + alg jwa.KeyAlgorithm + key interface{} + options []Option +} + +// WithKey is a multi-purpose option. It can be used for either jwt.Sign, jwt.Parse (and +// its siblings), and jwt.Serializer methods. +// +// It is the caller's responsibility to match the suboptions to the operation that they +// are performing. For example, you are not allowed to do this: +// +// jwt.Sign(token, jwt.WithKey(alg, key, jweOptions...)) +// +// In the above example, the creation of the option via `jwt.WithKey()` will work, but +// when `jwt.Sign()` is called, the fact that you passed JWE suboptions will be +// detected, and it will be an error. +func WithKey(alg jwa.KeyAlgorithm, key interface{}, suboptions ...Option) SignEncryptParseOption { + return &signEncryptParseOption{option.New(identKey{}, &withKey{ + alg: alg, + key: key, + options: suboptions, + })} +} + +type withKeySet struct { + set jwk.Set + options []interface{} +} + +// WithKeySet forces the Parse method to verify the JWT message +// using one of the keys in the given key set. +// +// Key IDs (`kid`) in the JWS message and the JWK in the given `jwk.Set` +// must match in order for the key to be a candidate to be used for +// verification. +// +// This is for security reasons. If you must disable it, you can do so by +// specifying `jws.WithRequireKid(false)` in the suboptions. But we don't +// recommend it unless you know exactly what the security implications are +// +// When using this option, keys MUST have a proper 'alg' field +// set. This is because we need to know the exact algorithm that +// you (the user) wants to use to verify the token. We do NOT +// trust the token's headers, because they can easily be tampered with. +// +// However, there _is_ a workaround if you do understand the risks +// of allowing a library to automatically choose a signature verification strategy, +// and you do not mind the verification process having to possibly +// attempt using multiple times before succeeding to verify. See +// `jws.InferAlgorithmFromKey` option +// +// If you have only one key in the set, and are sure you want to +// use that key, you can use the `jwt.WithDefaultKey` option. +func WithKeySet(set jwk.Set, options ...interface{}) ParseOption { + return &parseOption{option.New(identKeySet{}, &withKeySet{ + set: set, + options: options, + })} +} + +// WithIssuer specifies that expected issuer value. If not specified, +// the value of issuer is not verified at all. +func WithIssuer(s string) ValidateOption { + return WithValidator(issuerClaimValueIs(s)) +} + +// WithSubject specifies that expected subject value. If not specified, +// the value of subject is not verified at all. +func WithSubject(s string) ValidateOption { + return WithValidator(ClaimValueIs(SubjectKey, s)) +} + +// WithJwtID specifies that expected jti value. If not specified, +// the value of jti is not verified at all. +func WithJwtID(s string) ValidateOption { + return WithValidator(ClaimValueIs(JwtIDKey, s)) +} + +// WithAudience specifies that expected audience value. +// `Validate()` will return true if one of the values in the `aud` element +// matches this value. If not specified, the value of issuer is not +// verified at all. +func WithAudience(s string) ValidateOption { + return WithValidator(audienceClaimContainsString(s)) +} + +// WithClaimValue specifies the expected value for a given claim +func WithClaimValue(name string, v interface{}) ValidateOption { + return WithValidator(ClaimValueIs(name, v)) +} + +type claimPair struct { + Name string + Value interface{} +} + +// WithTypedClaim allows a private claim to be parsed into the object type of +// your choice. It works much like the RegisterCustomField, but the effect +// is only applicable to the jwt.Parse function call which receives this option. +// +// While this can be extremely useful, this option should be used with caution: +// There are many caveats that your entire team/user-base needs to be aware of, +// and therefore in general its use is discouraged. Only use it when you know +// what you are doing, and you document its use clearly for others. +// +// First and foremost, this is a "per-object" option. Meaning that given the same +// serialized format, it is possible to generate two objects whose internal +// representations may differ. That is, if you parse one _WITH_ the option, +// and the other _WITHOUT_, their internal representation may completely differ. +// This could potentially lead to problems. +// +// Second, specifying this option will slightly slow down the decoding process +// as it needs to consult multiple definitions sources (global and local), so +// be careful if you are decoding a large number of tokens, as the effects will stack up. +// +// Finally, this option will also NOT work unless the tokens themselves support such +// parsing mechanism. For example, while tokens obtained from `jwt.New()` and +// `openid.New()` will respect this option, if you provide your own custom +// token type, it will need to implement the TokenWithDecodeCtx interface. +func WithTypedClaim(name string, object interface{}) ParseOption { + return &parseOption{option.New(identTypedClaim{}, claimPair{Name: name, Value: object})} +} + +// WithRequiredClaim specifies that the claim identified the given name +// must exist in the token. Only the existence of the claim is checked: +// the actual value associated with that field is not checked. +func WithRequiredClaim(name string) ValidateOption { + return WithValidator(IsRequired(name)) +} + +// WithMaxDelta specifies that given two claims `c1` and `c2` that represent time, the difference in +// time.Duration must be less than equal to the value specified by `d`. If `c1` or `c2` is the +// empty string, the current time (as computed by `time.Now` or the object passed via +// `WithClock()`) is used for the comparison. +// +// `c1` and `c2` are also assumed to be required, therefore not providing either claim in the +// token will result in an error. +// +// Because there is no way of reliably knowing how to parse private claims, we currently only +// support `iat`, `exp`, and `nbf` claims. +// +// If the empty string is passed to c1 or c2, then the current time (as calculated by time.Now() or +// the clock object provided via WithClock()) is used. +// +// For example, in order to specify that `exp` - `iat` should be less than 10*time.Second, you would write +// +// jwt.Validate(token, jwt.WithMaxDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey)) +// +// If AcceptableSkew of 2 second is specified, the above will return valid for any value of +// `exp` - `iat` between 8 (10-2) and 12 (10+2). +func WithMaxDelta(dur time.Duration, c1, c2 string) ValidateOption { + return WithValidator(MaxDeltaIs(c1, c2, dur)) +} + +// WithMinDelta is almost exactly the same as WithMaxDelta, but force validation to fail if +// the difference between time claims are less than dur. +// +// For example, in order to specify that `exp` - `iat` should be greater than 10*time.Second, you would write +// +// jwt.Validate(token, jwt.WithMinDelta(10*time.Second, jwt.ExpirationKey, jwt.IssuedAtKey)) +// +// The validation would fail if the difference is less than 10 seconds. +func WithMinDelta(dur time.Duration, c1, c2 string) ValidateOption { + return WithValidator(MinDeltaIs(c1, c2, dur)) +} + +// WithVerifyAuto specifies that the JWS verification should be attempted +// by using the data available in the JWS message. Currently only verification +// method available is to use the keys available in the JWKS URL pointed +// in the `jku` field. +// +// The first argument should either be `nil`, or your custom jwk.Fetcher +// object, which tells how the JWKS should be fetched. Leaving it to +// `nil` is equivalent to specifying that `jwk.Fetch` should be used. +// +// You can further pass options to customize the fetching behavior. +// +// One notable difference in the option available via the `jwt` +// package and the `jws.Verify()` or `jwk.Fetch()` functions is that +// by default all fetching is disabled unless you explicitly whitelist urls. +// Therefore, when you use this option you WILL have to specify at least +// the `jwk.WithFetchWhitelist()` suboption: as: +// +// jwt.Parse(data, jwt.WithVerifyAuto(nil, jwk.WithFetchWhitelist(...))) +// +// See the list of available options that you can pass to `jwk.Fetch()` +// in the `jwk` package +func WithVerifyAuto(f jwk.Fetcher, options ...jwk.FetchOption) ParseOption { + return &parseOption{option.New(identVerifyAuto{}, jws.WithVerifyAuto(f, options...))} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.yaml b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.yaml new file mode 100644 index 0000000000..18179b2624 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options.yaml @@ -0,0 +1,207 @@ +package_name: jwt +output: jwt/options_gen.go +interfaces: + - name: GlobalOption + comment: | + GlobalOption describes an Option that can be passed to `Settings()`. + - name: EncryptOption + comment: | + EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt + - name: ParseOption + methods: + - parseOption + - readFileOption + comment: | + ParseOption describes an Option that can be passed to `jwt.Parse()`. + ParseOption also implements ReadFileOption, therefore it may be + safely pass them to `jwt.ReadFile()` + - name: SignOption + comment: | + SignOption describes an Option that can be passed to `jwt.Sign()` or + (jwt.Serializer).Sign + - name: SignEncryptParseOption + methods: + - parseOption + - encryptOption + - readFileOption + - signOption + comment: | + SignParseOption describes an Option that can be passed to both `jwt.Sign()` or + `jwt.Parse()` + - name: ValidateOption + methods: + - parseOption + - readFileOption + - validateOption + comment: | + ValidateOption describes an Option that can be passed to Validate(). + ValidateOption also implements ParseOption, therefore it may be + safely passed to `Parse()` (and thus `jwt.ReadFile()`) + - name: ReadFileOption + comment: | + ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +options: + - ident: AcceptableSkew + interface: ValidateOption + argument_type: time.Duration + comment: | + WithAcceptableSkew specifies the duration in which exp and nbf + claims may differ by. This value should be positive + - ident: Truncation + interface: ValidateOption + argument_type: time.Duration + comment: | + WithTruncation speficies the amount that should be used when + truncating time values used during time-based validation routines. + By default time values are truncated down to second accuracy. + If you want to use sub-second accuracy, you will need to set + this value to 0. + - ident: Clock + interface: ValidateOption + argument_type: Clock + comment: | + WithClock specifies the `Clock` to be used when verifying + exp and nbf claims. + - ident: Context + interface: ValidateOption + argument_type: context.Context + comment: | + WithContext allows you to specify a context.Context object to be used + with `jwt.Validate()` option. + + Please be aware that in the next major release of this library, + `jwt.Validate()`'s signature will change to include an explicit + `context.Context` object. + - ident: FlattenAudience + interface: GlobalOption + argument_type: bool + comment: | + WithFlattenAudience specifies the the `jwt.FlattenAudience` option on + every token defaults to enabled. You can still disable this on a per-object + basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call. + + See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and + `jwt.FlattenAudience` for more details + - ident: FormKey + interface: ParseOption + argument_type: string + comment: | + WithFormKey is used to specify header keys to search for tokens. + + While the type system allows this option to be passed to jwt.Parse() directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: HeaderKey + interface: ParseOption + argument_type: string + comment: | + WithHeaderKey is used to specify header keys to search for tokens. + + While the type system allows this option to be passed to `jwt.Parse()` directly, + doing so will have no effect. Only use it for HTTP request parsing functions + - ident: Token + interface: ParseOption + argument_type: Token + comment: | + WithToken specifies the token instance where the result JWT is stored + when parsing JWT tokensthat is used when parsing + - ident: Validate + interface: ParseOption + argument_type: bool + comment: | + WithValidate is passed to `Parse()` method to denote that the + validation of the JWT token should be performed (or not) after + a successful parsing of the incoming payload. + + This option is enabled by default. + + If you would like disable validation, + you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()` + - ident: Verify + interface: ParseOption + argument_type: bool + comment: | + WithVerify is passed to `Parse()` method to denote that the + signature verification should be performed after a successful + deserialization of the incoming payload. + + This option is enabled by default. + + If you do not provide any verification key sources, `jwt.Parse()` + would return an error. + + If you would like to only parse the JWT payload and not verify it, + you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()` + - ident: KeyProvider + interface: ParseOption + argument_type: jws.KeyProvider + comment: | + WithKeyProvider allows users to specify an object to provide keys to + sign/verify tokens using arbitrary code. Please read the documentation + for `jws.KeyProvider` in the `jws` package for details on how this works. + - ident: Pedantic + interface: ParseOption + argument_type: bool + comment: | + WithPedantic enables pedantic mode for parsing JWTs. Currently this only + applies to checking for the correct `typ` and/or `cty` when necessary. + - ident: EncryptOption + interface: EncryptOption + argument_type: jwe.EncryptOption + comment: | + WithEncryptOption provides an escape hatch for cases where extra options to + `(jws.Serializer).Encrypt()` must be specified when usng `jwt.Sign()`. Normally you do not + need to use this. + - ident: SignOption + interface: SignOption + argument_type: jws.SignOption + comment: | + WithSignOption provides an escape hatch for cases where extra options to + `jws.Sign()` must be specified when usng `jwt.Sign()`. Normally you do not + need to use this. + - ident: Validator + interface: ValidateOption + argument_type: Validator + comment: | + WithValidator validates the token with the given Validator. + + For example, in order to validate tokens that are only valid during August, you would write + + validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error { + if time.Now().Month() != 8 { + return fmt.Errorf(`tokens are only valid during August!`) + } + return nil + }) + err := jwt.Validate(token, jwt.WithValidator(validator)) + - ident: FS + interface: ReadFileOption + argument_type: fs.FS + comment: | + WithFS specifies the source `fs.FS` object to read the file from. + - ident: NumericDateParsePrecision + interface: GlobalOption + argument_type: int + comment: | + WithNumericDateParsePrecision sets the precision up to which the + library uses to parse fractional dates found in the numeric date + fields. Default is 0 (second, no fractionals), max is 9 (nanosecond) + - ident: NumericDateFormatPrecision + interface: GlobalOption + argument_type: int + comment: | + WithNumericDateFormatPrecision sets the precision up to which the + library uses to format fractional dates found in the numeric date + fields. Default is 0 (second, no fractionals), max is 9 (nanosecond) + - ident: NumericDateParsePedantic + interface: GlobalOption + argument_type: bool + comment: | + WithNumericDateParsePedantic specifies if the parser should behave + in a pedantic manner when parsing numeric dates. Normally this library + attempts to interpret timestamps as a numeric value representing + number of seconds (with an optional fractional part), but if that fails + it tries to parse using a RFC3339 parser. This allows us to parse + payloads from non-comforming servers. + + However, when you set WithNumericDateParePedantic to `true`, the + RFC3339 parser is not tried, and we expect a numeric value strictly diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options_gen.go new file mode 100644 index 0000000000..fb947eccff --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/options_gen.go @@ -0,0 +1,383 @@ +// This file is auto-generated by internal/cmd/genoptions/main.go. DO NOT EDIT + +package jwt + +import ( + "context" + "io/fs" + "time" + + "github.com/lestrrat-go/jwx/v2/jwe" + "github.com/lestrrat-go/jwx/v2/jws" + "github.com/lestrrat-go/option" +) + +type Option = option.Interface + +// EncryptOption describes an Option that can be passed to (jwt.Serializer).Encrypt +type EncryptOption interface { + Option + encryptOption() +} + +type encryptOption struct { + Option +} + +func (*encryptOption) encryptOption() {} + +// GlobalOption describes an Option that can be passed to `Settings()`. +type GlobalOption interface { + Option + globalOption() +} + +type globalOption struct { + Option +} + +func (*globalOption) globalOption() {} + +// ParseOption describes an Option that can be passed to `jwt.Parse()`. +// ParseOption also implements ReadFileOption, therefore it may be +// safely pass them to `jwt.ReadFile()` +type ParseOption interface { + Option + parseOption() + readFileOption() +} + +type parseOption struct { + Option +} + +func (*parseOption) parseOption() {} + +func (*parseOption) readFileOption() {} + +// ReadFileOption is a type of `Option` that can be passed to `jws.ReadFile` +type ReadFileOption interface { + Option + readFileOption() +} + +type readFileOption struct { + Option +} + +func (*readFileOption) readFileOption() {} + +// SignParseOption describes an Option that can be passed to both `jwt.Sign()` or +// `jwt.Parse()` +type SignEncryptParseOption interface { + Option + parseOption() + encryptOption() + readFileOption() + signOption() +} + +type signEncryptParseOption struct { + Option +} + +func (*signEncryptParseOption) parseOption() {} + +func (*signEncryptParseOption) encryptOption() {} + +func (*signEncryptParseOption) readFileOption() {} + +func (*signEncryptParseOption) signOption() {} + +// SignOption describes an Option that can be passed to `jwt.Sign()` or +// (jwt.Serializer).Sign +type SignOption interface { + Option + signOption() +} + +type signOption struct { + Option +} + +func (*signOption) signOption() {} + +// ValidateOption describes an Option that can be passed to Validate(). +// ValidateOption also implements ParseOption, therefore it may be +// safely passed to `Parse()` (and thus `jwt.ReadFile()`) +type ValidateOption interface { + Option + parseOption() + readFileOption() + validateOption() +} + +type validateOption struct { + Option +} + +func (*validateOption) parseOption() {} + +func (*validateOption) readFileOption() {} + +func (*validateOption) validateOption() {} + +type identAcceptableSkew struct{} +type identClock struct{} +type identContext struct{} +type identEncryptOption struct{} +type identFS struct{} +type identFlattenAudience struct{} +type identFormKey struct{} +type identHeaderKey struct{} +type identKeyProvider struct{} +type identNumericDateFormatPrecision struct{} +type identNumericDateParsePedantic struct{} +type identNumericDateParsePrecision struct{} +type identPedantic struct{} +type identSignOption struct{} +type identToken struct{} +type identTruncation struct{} +type identValidate struct{} +type identValidator struct{} +type identVerify struct{} + +func (identAcceptableSkew) String() string { + return "WithAcceptableSkew" +} + +func (identClock) String() string { + return "WithClock" +} + +func (identContext) String() string { + return "WithContext" +} + +func (identEncryptOption) String() string { + return "WithEncryptOption" +} + +func (identFS) String() string { + return "WithFS" +} + +func (identFlattenAudience) String() string { + return "WithFlattenAudience" +} + +func (identFormKey) String() string { + return "WithFormKey" +} + +func (identHeaderKey) String() string { + return "WithHeaderKey" +} + +func (identKeyProvider) String() string { + return "WithKeyProvider" +} + +func (identNumericDateFormatPrecision) String() string { + return "WithNumericDateFormatPrecision" +} + +func (identNumericDateParsePedantic) String() string { + return "WithNumericDateParsePedantic" +} + +func (identNumericDateParsePrecision) String() string { + return "WithNumericDateParsePrecision" +} + +func (identPedantic) String() string { + return "WithPedantic" +} + +func (identSignOption) String() string { + return "WithSignOption" +} + +func (identToken) String() string { + return "WithToken" +} + +func (identTruncation) String() string { + return "WithTruncation" +} + +func (identValidate) String() string { + return "WithValidate" +} + +func (identValidator) String() string { + return "WithValidator" +} + +func (identVerify) String() string { + return "WithVerify" +} + +// WithAcceptableSkew specifies the duration in which exp and nbf +// claims may differ by. This value should be positive +func WithAcceptableSkew(v time.Duration) ValidateOption { + return &validateOption{option.New(identAcceptableSkew{}, v)} +} + +// WithClock specifies the `Clock` to be used when verifying +// exp and nbf claims. +func WithClock(v Clock) ValidateOption { + return &validateOption{option.New(identClock{}, v)} +} + +// WithContext allows you to specify a context.Context object to be used +// with `jwt.Validate()` option. +// +// Please be aware that in the next major release of this library, +// `jwt.Validate()`'s signature will change to include an explicit +// `context.Context` object. +func WithContext(v context.Context) ValidateOption { + return &validateOption{option.New(identContext{}, v)} +} + +// WithEncryptOption provides an escape hatch for cases where extra options to +// `(jws.Serializer).Encrypt()` must be specified when usng `jwt.Sign()`. Normally you do not +// need to use this. +func WithEncryptOption(v jwe.EncryptOption) EncryptOption { + return &encryptOption{option.New(identEncryptOption{}, v)} +} + +// WithFS specifies the source `fs.FS` object to read the file from. +func WithFS(v fs.FS) ReadFileOption { + return &readFileOption{option.New(identFS{}, v)} +} + +// WithFlattenAudience specifies the the `jwt.FlattenAudience` option on +// every token defaults to enabled. You can still disable this on a per-object +// basis using the `jwt.Options().Disable(jwt.FlattenAudience)` method call. +// +// See the documentation for `jwt.TokenOptionSet`, `(jwt.Token).Options`, and +// `jwt.FlattenAudience` for more details +func WithFlattenAudience(v bool) GlobalOption { + return &globalOption{option.New(identFlattenAudience{}, v)} +} + +// WithFormKey is used to specify header keys to search for tokens. +// +// While the type system allows this option to be passed to jwt.Parse() directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithFormKey(v string) ParseOption { + return &parseOption{option.New(identFormKey{}, v)} +} + +// WithHeaderKey is used to specify header keys to search for tokens. +// +// While the type system allows this option to be passed to `jwt.Parse()` directly, +// doing so will have no effect. Only use it for HTTP request parsing functions +func WithHeaderKey(v string) ParseOption { + return &parseOption{option.New(identHeaderKey{}, v)} +} + +// WithKeyProvider allows users to specify an object to provide keys to +// sign/verify tokens using arbitrary code. Please read the documentation +// for `jws.KeyProvider` in the `jws` package for details on how this works. +func WithKeyProvider(v jws.KeyProvider) ParseOption { + return &parseOption{option.New(identKeyProvider{}, v)} +} + +// WithNumericDateFormatPrecision sets the precision up to which the +// library uses to format fractional dates found in the numeric date +// fields. Default is 0 (second, no fractionals), max is 9 (nanosecond) +func WithNumericDateFormatPrecision(v int) GlobalOption { + return &globalOption{option.New(identNumericDateFormatPrecision{}, v)} +} + +// WithNumericDateParsePedantic specifies if the parser should behave +// in a pedantic manner when parsing numeric dates. Normally this library +// attempts to interpret timestamps as a numeric value representing +// number of seconds (with an optional fractional part), but if that fails +// it tries to parse using a RFC3339 parser. This allows us to parse +// payloads from non-comforming servers. +// +// However, when you set WithNumericDateParePedantic to `true`, the +// RFC3339 parser is not tried, and we expect a numeric value strictly +func WithNumericDateParsePedantic(v bool) GlobalOption { + return &globalOption{option.New(identNumericDateParsePedantic{}, v)} +} + +// WithNumericDateParsePrecision sets the precision up to which the +// library uses to parse fractional dates found in the numeric date +// fields. Default is 0 (second, no fractionals), max is 9 (nanosecond) +func WithNumericDateParsePrecision(v int) GlobalOption { + return &globalOption{option.New(identNumericDateParsePrecision{}, v)} +} + +// WithPedantic enables pedantic mode for parsing JWTs. Currently this only +// applies to checking for the correct `typ` and/or `cty` when necessary. +func WithPedantic(v bool) ParseOption { + return &parseOption{option.New(identPedantic{}, v)} +} + +// WithSignOption provides an escape hatch for cases where extra options to +// `jws.Sign()` must be specified when usng `jwt.Sign()`. Normally you do not +// need to use this. +func WithSignOption(v jws.SignOption) SignOption { + return &signOption{option.New(identSignOption{}, v)} +} + +// WithToken specifies the token instance where the result JWT is stored +// when parsing JWT tokensthat is used when parsing +func WithToken(v Token) ParseOption { + return &parseOption{option.New(identToken{}, v)} +} + +// WithTruncation speficies the amount that should be used when +// truncating time values used during time-based validation routines. +// By default time values are truncated down to second accuracy. +// If you want to use sub-second accuracy, you will need to set +// this value to 0. +func WithTruncation(v time.Duration) ValidateOption { + return &validateOption{option.New(identTruncation{}, v)} +} + +// WithValidate is passed to `Parse()` method to denote that the +// validation of the JWT token should be performed (or not) after +// a successful parsing of the incoming payload. +// +// This option is enabled by default. +// +// If you would like disable validation, +// you must use `jwt.WithValidate(false)` or use `jwt.ParseInsecure()` +func WithValidate(v bool) ParseOption { + return &parseOption{option.New(identValidate{}, v)} +} + +// WithValidator validates the token with the given Validator. +// +// For example, in order to validate tokens that are only valid during August, you would write +// +// validator := jwt.ValidatorFunc(func(_ context.Context, t jwt.Token) error { +// if time.Now().Month() != 8 { +// return fmt.Errorf(`tokens are only valid during August!`) +// } +// return nil +// }) +// err := jwt.Validate(token, jwt.WithValidator(validator)) +func WithValidator(v Validator) ValidateOption { + return &validateOption{option.New(identValidator{}, v)} +} + +// WithVerify is passed to `Parse()` method to denote that the +// signature verification should be performed after a successful +// deserialization of the incoming payload. +// +// This option is enabled by default. +// +// If you do not provide any verification key sources, `jwt.Parse()` +// would return an error. +// +// If you would like to only parse the JWT payload and not verify it, +// you must use `jwt.WithVerify(false)` or use `jwt.ParseInsecure()` +func WithVerify(v bool) ParseOption { + return &parseOption{option.New(identVerify{}, v)} +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/serialize.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/serialize.go new file mode 100644 index 0000000000..1a5e467d03 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/serialize.go @@ -0,0 +1,264 @@ +package jwt + +import ( + "fmt" + + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/jwe" + "github.com/lestrrat-go/jwx/v2/jws" +) + +type SerializeCtx interface { + Step() int + Nested() bool +} + +type serializeCtx struct { + step int + nested bool +} + +func (ctx *serializeCtx) Step() int { + return ctx.step +} + +func (ctx *serializeCtx) Nested() bool { + return ctx.nested +} + +type SerializeStep interface { + Serialize(SerializeCtx, interface{}) (interface{}, error) +} + +// errStep is always an error. used to indicate that a method like +// serializer.Sign or Encrypt already errored out on configuration +type errStep struct { + err error +} + +func (e errStep) Serialize(_ SerializeCtx, _ interface{}) (interface{}, error) { + return nil, e.err +} + +// Serializer is a generic serializer for JWTs. Whereas other conveinience +// functions can only do one thing (such as generate a JWS signed JWT), +// Using this construct you can serialize the token however you want. +// +// By default the serializer only marshals the token into a JSON payload. +// You must set up the rest of the steps that should be taken by the +// serializer. +// +// For example, to marshal the token into JSON, then apply JWS and JWE +// in that order, you would do: +// +// serialized, err := jwt.NewSerialer(). +// Sign(jwa.RS256, key). +// Encrypt(jwa.RSA_OAEP, key.PublicKey). +// Serialize(token) +// +// The `jwt.Sign()` function is equivalent to +// +// serialized, err := jwt.NewSerializer(). +// Sign(...args...). +// Serialize(token) +type Serializer struct { + steps []SerializeStep +} + +// NewSerializer creates a new empty serializer. +func NewSerializer() *Serializer { + return &Serializer{} +} + +// Reset clears all of the registered steps. +func (s *Serializer) Reset() *Serializer { + s.steps = nil + return s +} + +// Step adds a new Step to the serialization process +func (s *Serializer) Step(step SerializeStep) *Serializer { + s.steps = append(s.steps, step) + return s +} + +type jsonSerializer struct{} + +func (jsonSerializer) Serialize(_ SerializeCtx, v interface{}) (interface{}, error) { + token, ok := v.(Token) + if !ok { + return nil, fmt.Errorf(`invalid input: expected jwt.Token`) + } + + buf, err := json.Marshal(token) + if err != nil { + return nil, fmt.Errorf(`failed to serialize as JSON`) + } + return buf, nil +} + +type genericHeader interface { + Get(string) (interface{}, bool) + Set(string, interface{}) error +} + +func setTypeOrCty(ctx SerializeCtx, hdrs genericHeader) error { + // cty and typ are common between JWE/JWS, so we don't use + // the constants in jws/jwe package here + const typKey = `typ` + const ctyKey = `cty` + + if ctx.Step() == 1 { + // We are executed immediately after json marshaling + if _, ok := hdrs.Get(typKey); !ok { + if err := hdrs.Set(typKey, `JWT`); err != nil { + return fmt.Errorf(`failed to set %s key to "JWT": %w`, typKey, err) + } + } + } else { + if ctx.Nested() { + // If this is part of a nested sequence, we should set cty = 'JWT' + // https://datatracker.ietf.org/doc/html/rfc7519#section-5.2 + if err := hdrs.Set(ctyKey, `JWT`); err != nil { + return fmt.Errorf(`failed to set %s key to "JWT": %w`, ctyKey, err) + } + } + } + return nil +} + +type jwsSerializer struct { + options []jws.SignOption +} + +func (s *jwsSerializer) Serialize(ctx SerializeCtx, v interface{}) (interface{}, error) { + payload, ok := v.([]byte) + if !ok { + return nil, fmt.Errorf(`expected []byte as input`) + } + + for _, option := range s.options { + pc, ok := option.Value().(interface{ Protected(jws.Headers) jws.Headers }) + if !ok { + continue + } + hdrs := pc.Protected(jws.NewHeaders()) + if err := setTypeOrCty(ctx, hdrs); err != nil { + return nil, err // this is already wrapped + } + + // JWTs MUST NOT use b64 = false + // https://datatracker.ietf.org/doc/html/rfc7797#section-7 + if v, ok := hdrs.Get("b64"); ok { + if bval, bok := v.(bool); bok { + if !bval { // b64 = false + return nil, fmt.Errorf(`b64 cannot be false for JWTs`) + } + } + } + } + return jws.Sign(payload, s.options...) +} + +func (s *Serializer) Sign(options ...SignOption) *Serializer { + var soptions []jws.SignOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toSignOptions(rawoptions...) + if err != nil { + return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Sign: failed to convert options into jws.SignOption: %w`, err)}) + } + soptions = converted + } + return s.sign(soptions...) +} + +func (s *Serializer) sign(options ...jws.SignOption) *Serializer { + return s.Step(&jwsSerializer{ + options: options, + }) +} + +type jweSerializer struct { + options []jwe.EncryptOption +} + +func (s *jweSerializer) Serialize(ctx SerializeCtx, v interface{}) (interface{}, error) { + payload, ok := v.([]byte) + if !ok { + return nil, fmt.Errorf(`expected []byte as input`) + } + + hdrs := jwe.NewHeaders() + if err := setTypeOrCty(ctx, hdrs); err != nil { + return nil, err // this is already wrapped + } + + options := append([]jwe.EncryptOption{jwe.WithMergeProtectedHeaders(true), jwe.WithProtectedHeaders(hdrs)}, s.options...) + return jwe.Encrypt(payload, options...) +} + +// Encrypt specifies the JWT to be serialized as an encrypted payload. +// +// One notable difference between this method and `jwe.Encrypt()` is that +// while `jwe.Encrypt()` OVERWRITES the previous headers when `jwe.WithProtectedHeaders()` +// is provided, this method MERGES them. This is due to the fact that we +// MUST add some extra headers to construct a proper JWE message. +// Be careful when you pass multiple `jwe.EncryptOption`s. +func (s *Serializer) Encrypt(options ...EncryptOption) *Serializer { + var eoptions []jwe.EncryptOption + if l := len(options); l > 0 { + // we need to from SignOption to Option because ... reasons + // (todo: when go1.18 prevails, use type parameters + rawoptions := make([]Option, l) + for i, option := range options { + rawoptions[i] = option + } + + converted, err := toEncryptOptions(rawoptions...) + if err != nil { + return s.Step(errStep{fmt.Errorf(`(jwt.Serializer).Encrypt: failed to convert options into jwe.EncryptOption: %w`, err)}) + } + eoptions = converted + } + return s.encrypt(eoptions...) +} + +func (s *Serializer) encrypt(options ...jwe.EncryptOption) *Serializer { + return s.Step(&jweSerializer{ + options: options, + }) +} + +func (s *Serializer) Serialize(t Token) ([]byte, error) { + steps := make([]SerializeStep, len(s.steps)+1) + steps[0] = jsonSerializer{} + for i, step := range s.steps { + steps[i+1] = step + } + + var ctx serializeCtx + ctx.nested = len(s.steps) > 1 + var payload interface{} = t + for i, step := range steps { + ctx.step = i + v, err := step.Serialize(&ctx, payload) + if err != nil { + return nil, fmt.Errorf(`failed to serialize token at step #%d: %w`, i+1, err) + } + payload = v + } + + res, ok := payload.([]byte) + if !ok { + return nil, fmt.Errorf(`invalid serialization produced`) + } + + return res, nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_gen.go new file mode 100644 index 0000000000..ea55b83756 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_gen.go @@ -0,0 +1,539 @@ +// This file is auto-generated by jwt/internal/cmd/gentoken/main.go. DO NOT EDIT + +package jwt + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/lestrrat-go/iter/mapiter" + "github.com/lestrrat-go/jwx/v2/internal/base64" + "github.com/lestrrat-go/jwx/v2/internal/iter" + "github.com/lestrrat-go/jwx/v2/internal/json" + "github.com/lestrrat-go/jwx/v2/internal/pool" + "github.com/lestrrat-go/jwx/v2/jwt/internal/types" +) + +const ( + AudienceKey = "aud" + ExpirationKey = "exp" + IssuedAtKey = "iat" + IssuerKey = "iss" + JwtIDKey = "jti" + NotBeforeKey = "nbf" + SubjectKey = "sub" +) + +// Token represents a generic JWT token. +// which are type-aware (to an extent). Other claims may be accessed via the `Get`/`Set` +// methods but their types are not taken into consideration at all. If you have non-standard +// claims that you must frequently access, consider creating accessors functions +// like the following +// +// func SetFoo(tok jwt.Token) error +// func GetFoo(tok jwt.Token) (*Customtyp, error) +// +// Embedding jwt.Token into another struct is not recommended, because +// jwt.Token needs to handle private claims, and this really does not +// work well when it is embedded in other structure +type Token interface { + + // Audience returns the value for "aud" field of the token + Audience() []string + + // Expiration returns the value for "exp" field of the token + Expiration() time.Time + + // IssuedAt returns the value for "iat" field of the token + IssuedAt() time.Time + + // Issuer returns the value for "iss" field of the token + Issuer() string + + // JwtID returns the value for "jti" field of the token + JwtID() string + + // NotBefore returns the value for "nbf" field of the token + NotBefore() time.Time + + // Subject returns the value for "sub" field of the token + Subject() string + + // PrivateClaims return the entire set of fields (claims) in the token + // *other* than the pre-defined fields such as `iss`, `nbf`, `iat`, etc. + PrivateClaims() map[string]interface{} + + // Get returns the value of the corresponding field in the token, such as + // `nbf`, `exp`, `iat`, and other user-defined fields. If the field does not + // exist in the token, the second return value will be `false` + // + // If you need to access fields like `alg`, `kid`, `jku`, etc, you need + // to access the corresponding fields in the JWS/JWE message. For this, + // you will need to access them by directly parsing the payload using + // `jws.Parse` and `jwe.Parse` + Get(string) (interface{}, bool) + + // Set assigns a value to the corresponding field in the token. Some + // pre-defined fields such as `nbf`, `iat`, `iss` need their values to + // be of a specific type. See the other getter methods in this interface + // for the types of each of these fields + Set(string, interface{}) error + Remove(string) error + + // Options returns the per-token options associated with this token. + // The options set value will be copied when the token is cloned via `Clone()` + // but it will not survive when the token goes through marshaling/unmarshaling + // such as `json.Marshal` and `json.Unmarshal` + Options() *TokenOptionSet + Clone() (Token, error) + Iterate(context.Context) Iterator + Walk(context.Context, Visitor) error + AsMap(context.Context) (map[string]interface{}, error) +} +type stdToken struct { + mu *sync.RWMutex + dc DecodeCtx // per-object context for decoding + options TokenOptionSet // per-object option + audience types.StringList // https://tools.ietf.org/html/rfc7519#section-4.1.3 + expiration *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.4 + issuedAt *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.6 + issuer *string // https://tools.ietf.org/html/rfc7519#section-4.1.1 + jwtID *string // https://tools.ietf.org/html/rfc7519#section-4.1.7 + notBefore *types.NumericDate // https://tools.ietf.org/html/rfc7519#section-4.1.5 + subject *string // https://tools.ietf.org/html/rfc7519#section-4.1.2 + privateClaims map[string]interface{} +} + +// New creates a standard token, with minimal knowledge of +// possible claims. Standard claims include"aud", "exp", "iat", "iss", "jti", "nbf" and "sub". +// Convenience accessors are provided for these standard claims +func New() Token { + return &stdToken{ + mu: &sync.RWMutex{}, + privateClaims: make(map[string]interface{}), + options: DefaultOptionSet(), + } +} + +func (t *stdToken) Options() *TokenOptionSet { + return &t.options +} + +func (t *stdToken) Get(name string) (interface{}, bool) { + t.mu.RLock() + defer t.mu.RUnlock() + switch name { + case AudienceKey: + if t.audience == nil { + return nil, false + } + v := t.audience.Get() + return v, true + case ExpirationKey: + if t.expiration == nil { + return nil, false + } + v := t.expiration.Get() + return v, true + case IssuedAtKey: + if t.issuedAt == nil { + return nil, false + } + v := t.issuedAt.Get() + return v, true + case IssuerKey: + if t.issuer == nil { + return nil, false + } + v := *(t.issuer) + return v, true + case JwtIDKey: + if t.jwtID == nil { + return nil, false + } + v := *(t.jwtID) + return v, true + case NotBeforeKey: + if t.notBefore == nil { + return nil, false + } + v := t.notBefore.Get() + return v, true + case SubjectKey: + if t.subject == nil { + return nil, false + } + v := *(t.subject) + return v, true + default: + v, ok := t.privateClaims[name] + return v, ok + } +} + +func (t *stdToken) Remove(key string) error { + t.mu.Lock() + defer t.mu.Unlock() + switch key { + case AudienceKey: + t.audience = nil + case ExpirationKey: + t.expiration = nil + case IssuedAtKey: + t.issuedAt = nil + case IssuerKey: + t.issuer = nil + case JwtIDKey: + t.jwtID = nil + case NotBeforeKey: + t.notBefore = nil + case SubjectKey: + t.subject = nil + default: + delete(t.privateClaims, key) + } + return nil +} + +func (t *stdToken) Set(name string, value interface{}) error { + t.mu.Lock() + defer t.mu.Unlock() + return t.setNoLock(name, value) +} + +func (t *stdToken) DecodeCtx() DecodeCtx { + t.mu.RLock() + defer t.mu.RUnlock() + return t.dc +} + +func (t *stdToken) SetDecodeCtx(v DecodeCtx) { + t.mu.Lock() + defer t.mu.Unlock() + t.dc = v +} + +func (t *stdToken) setNoLock(name string, value interface{}) error { + switch name { + case AudienceKey: + var acceptor types.StringList + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, AudienceKey, err) + } + t.audience = acceptor + return nil + case ExpirationKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, ExpirationKey, err) + } + t.expiration = &acceptor + return nil + case IssuedAtKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, IssuedAtKey, err) + } + t.issuedAt = &acceptor + return nil + case IssuerKey: + if v, ok := value.(string); ok { + t.issuer = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, IssuerKey, value) + case JwtIDKey: + if v, ok := value.(string); ok { + t.jwtID = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, JwtIDKey, value) + case NotBeforeKey: + var acceptor types.NumericDate + if err := acceptor.Accept(value); err != nil { + return fmt.Errorf(`invalid value for %s key: %w`, NotBeforeKey, err) + } + t.notBefore = &acceptor + return nil + case SubjectKey: + if v, ok := value.(string); ok { + t.subject = &v + return nil + } + return fmt.Errorf(`invalid value for %s key: %T`, SubjectKey, value) + default: + if t.privateClaims == nil { + t.privateClaims = map[string]interface{}{} + } + t.privateClaims[name] = value + } + return nil +} + +func (t *stdToken) Audience() []string { + t.mu.RLock() + defer t.mu.RUnlock() + if t.audience != nil { + return t.audience.Get() + } + return nil +} + +func (t *stdToken) Expiration() time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if t.expiration != nil { + return t.expiration.Get() + } + return time.Time{} +} + +func (t *stdToken) IssuedAt() time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if t.issuedAt != nil { + return t.issuedAt.Get() + } + return time.Time{} +} + +func (t *stdToken) Issuer() string { + t.mu.RLock() + defer t.mu.RUnlock() + if t.issuer != nil { + return *(t.issuer) + } + return "" +} + +func (t *stdToken) JwtID() string { + t.mu.RLock() + defer t.mu.RUnlock() + if t.jwtID != nil { + return *(t.jwtID) + } + return "" +} + +func (t *stdToken) NotBefore() time.Time { + t.mu.RLock() + defer t.mu.RUnlock() + if t.notBefore != nil { + return t.notBefore.Get() + } + return time.Time{} +} + +func (t *stdToken) Subject() string { + t.mu.RLock() + defer t.mu.RUnlock() + if t.subject != nil { + return *(t.subject) + } + return "" +} + +func (t *stdToken) PrivateClaims() map[string]interface{} { + t.mu.RLock() + defer t.mu.RUnlock() + return t.privateClaims +} + +func (t *stdToken) makePairs() []*ClaimPair { + t.mu.RLock() + defer t.mu.RUnlock() + + pairs := make([]*ClaimPair, 0, 7) + if t.audience != nil { + v := t.audience.Get() + pairs = append(pairs, &ClaimPair{Key: AudienceKey, Value: v}) + } + if t.expiration != nil { + v := t.expiration.Get() + pairs = append(pairs, &ClaimPair{Key: ExpirationKey, Value: v}) + } + if t.issuedAt != nil { + v := t.issuedAt.Get() + pairs = append(pairs, &ClaimPair{Key: IssuedAtKey, Value: v}) + } + if t.issuer != nil { + v := *(t.issuer) + pairs = append(pairs, &ClaimPair{Key: IssuerKey, Value: v}) + } + if t.jwtID != nil { + v := *(t.jwtID) + pairs = append(pairs, &ClaimPair{Key: JwtIDKey, Value: v}) + } + if t.notBefore != nil { + v := t.notBefore.Get() + pairs = append(pairs, &ClaimPair{Key: NotBeforeKey, Value: v}) + } + if t.subject != nil { + v := *(t.subject) + pairs = append(pairs, &ClaimPair{Key: SubjectKey, Value: v}) + } + for k, v := range t.privateClaims { + pairs = append(pairs, &ClaimPair{Key: k, Value: v}) + } + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].Key.(string) < pairs[j].Key.(string) + }) + return pairs +} + +func (t *stdToken) UnmarshalJSON(buf []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + t.audience = nil + t.expiration = nil + t.issuedAt = nil + t.issuer = nil + t.jwtID = nil + t.notBefore = nil + t.subject = nil + dec := json.NewDecoder(bytes.NewReader(buf)) +LOOP: + for { + tok, err := dec.Token() + if err != nil { + return fmt.Errorf(`error reading token: %w`, err) + } + switch tok := tok.(type) { + case json.Delim: + // Assuming we're doing everything correctly, we should ONLY + // get either '{' or '}' here. + if tok == '}' { // End of object + break LOOP + } else if tok != '{' { + return fmt.Errorf(`expected '{', but got '%c'`, tok) + } + case string: // Objects can only have string keys + switch tok { + case AudienceKey: + var decoded types.StringList + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, AudienceKey, err) + } + t.audience = decoded + case ExpirationKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, ExpirationKey, err) + } + t.expiration = &decoded + case IssuedAtKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, IssuedAtKey, err) + } + t.issuedAt = &decoded + case IssuerKey: + if err := json.AssignNextStringToken(&t.issuer, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, IssuerKey, err) + } + case JwtIDKey: + if err := json.AssignNextStringToken(&t.jwtID, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, JwtIDKey, err) + } + case NotBeforeKey: + var decoded types.NumericDate + if err := dec.Decode(&decoded); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, NotBeforeKey, err) + } + t.notBefore = &decoded + case SubjectKey: + if err := json.AssignNextStringToken(&t.subject, dec); err != nil { + return fmt.Errorf(`failed to decode value for key %s: %w`, SubjectKey, err) + } + default: + if dc := t.dc; dc != nil { + if localReg := dc.Registry(); localReg != nil { + decoded, err := localReg.Decode(dec, tok) + if err == nil { + t.setNoLock(tok, decoded) + continue + } + } + } + decoded, err := registry.Decode(dec, tok) + if err == nil { + t.setNoLock(tok, decoded) + continue + } + return fmt.Errorf(`could not decode field %s: %w`, tok, err) + } + default: + return fmt.Errorf(`invalid token %T`, tok) + } + } + return nil +} + +func (t stdToken) MarshalJSON() ([]byte, error) { + buf := pool.GetBytesBuffer() + defer pool.ReleaseBytesBuffer(buf) + buf.WriteByte('{') + enc := json.NewEncoder(buf) + for i, pair := range t.makePairs() { + f := pair.Key.(string) + if i > 0 { + buf.WriteByte(',') + } + buf.WriteRune('"') + buf.WriteString(f) + buf.WriteString(`":`) + switch f { + case AudienceKey: + if err := json.EncodeAudience(enc, pair.Value.([]string), t.options.IsEnabled(FlattenAudience)); err != nil { + return nil, fmt.Errorf(`failed to encode "aud": %w`, err) + } + continue + case ExpirationKey, IssuedAtKey, NotBeforeKey: + enc.Encode(pair.Value.(time.Time).Unix()) + continue + } + switch v := pair.Value.(type) { + case []byte: + buf.WriteRune('"') + buf.WriteString(base64.EncodeToString(v)) + buf.WriteRune('"') + default: + if err := enc.Encode(v); err != nil { + return nil, fmt.Errorf(`failed to marshal field %s: %w`, f, err) + } + buf.Truncate(buf.Len() - 1) + } + } + buf.WriteByte('}') + ret := make([]byte, buf.Len()) + copy(ret, buf.Bytes()) + return ret, nil +} + +func (t *stdToken) Iterate(ctx context.Context) Iterator { + pairs := t.makePairs() + ch := make(chan *ClaimPair, len(pairs)) + go func(ctx context.Context, ch chan *ClaimPair, pairs []*ClaimPair) { + defer close(ch) + for _, pair := range pairs { + select { + case <-ctx.Done(): + return + case ch <- pair: + } + } + }(ctx, ch, pairs) + return mapiter.New(ch) +} + +func (t *stdToken) Walk(ctx context.Context, visitor Visitor) error { + return iter.WalkMap(ctx, t, visitor) +} + +func (t *stdToken) AsMap(ctx context.Context) (map[string]interface{}, error) { + return iter.AsMap(ctx, t) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options.go new file mode 100644 index 0000000000..0f54e05611 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options.go @@ -0,0 +1,78 @@ +package jwt + +import "sync" + +// TokenOptionSet is a bit flag containing per-token options. +type TokenOptionSet uint64 + +var defaultOptions TokenOptionSet +var defaultOptionsMu sync.RWMutex + +// TokenOption describes a single token option that can be set on +// the per-token option set (TokenOptionSet) +type TokenOption uint64 + +const ( + // FlattenAudience option controls whether the "aud" claim should be flattened + // to a single string upon the token being serialized to JSON. + // + // This is sometimes important when a JWT consumer does not understand that + // the "aud" claim can actually take the form of an array of strings. + // (We have been notified by users that AWS Cognito has manifested this behavior + // at some point) + // + // Unless the global option is set using `jwt.Settings()`, the default value is + // `disabled`, which means that "aud" claims are always rendered as a arrays of + // strings when serialized to JSON. + FlattenAudience TokenOption = 1 << iota + + // MaxPerTokenOption is a marker to denote the last value that an option can take. + // This value has no meaning other than to be used as a marker. + MaxPerTokenOption +) + +// Value returns the uint64 value of a single option +func (o TokenOption) Value() uint64 { + return uint64(o) +} + +// Value returns the uint64 bit flag value of an option set +func (o TokenOptionSet) Value() uint64 { + return uint64(o) +} + +// DefaultOptionSet creates a new TokenOptionSet using the default +// option set. This may differ depending on if/when functions that +// change the global state has been called, such as `jwt.Settings` +func DefaultOptionSet() TokenOptionSet { + return TokenOptionSet(defaultOptions.Value()) +} + +// Clear sets all bits to zero, effectively disabling all options +func (o *TokenOptionSet) Clear() { + *o = TokenOptionSet(uint64(0)) +} + +// Set sets the value of this option set, effectively *replacing* +// the entire option set with the new value. This is NOT the same +// as Enable/Disable. +func (o *TokenOptionSet) Set(s TokenOptionSet) { + *o = s +} + +// Enable sets the appropriate value to enable the option in the +// option set +func (o *TokenOptionSet) Enable(flag TokenOption) { + *o = TokenOptionSet(o.Value() | uint64(flag)) +} + +// Enable sets the appropriate value to disable the option in the +// option set +func (o *TokenOptionSet) Disable(flag TokenOption) { + *o = TokenOptionSet(o.Value() & ^uint64(flag)) +} + +// IsEnabled returns true if the given bit on the option set is enabled. +func (o TokenOptionSet) IsEnabled(flag TokenOption) bool { + return (uint64(o)&uint64(flag) == uint64(flag)) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options_gen.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options_gen.go new file mode 100644 index 0000000000..7e7cbf14aa --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/token_options_gen.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=TokenOption -output=token_options_gen.go"; DO NOT EDIT. + +package jwt + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FlattenAudience-1] + _ = x[MaxPerTokenOption-2] +} + +const _TokenOption_name = "FlattenAudienceMaxPerTokenOption" + +var _TokenOption_index = [...]uint8{0, 15, 32} + +func (i TokenOption) String() string { + i -= 1 + if i >= TokenOption(len(_TokenOption_index)-1) { + return "TokenOption(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _TokenOption_name[_TokenOption_index[i]:_TokenOption_index[i+1]] +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwt/validate.go b/vendor/github.com/lestrrat-go/jwx/v2/jwt/validate.go new file mode 100644 index 0000000000..db2a65959c --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwt/validate.go @@ -0,0 +1,557 @@ +package jwt + +import ( + "context" + "fmt" + "strconv" + "time" +) + +type Clock interface { + Now() time.Time +} +type ClockFunc func() time.Time + +func (f ClockFunc) Now() time.Time { + return f() +} + +func isSupportedTimeClaim(c string) error { + switch c { + case ExpirationKey, IssuedAtKey, NotBeforeKey: + return nil + } + return NewValidationError(fmt.Errorf(`unsupported time claim %s`, strconv.Quote(c))) +} + +func timeClaim(t Token, clock Clock, c string) time.Time { + switch c { + case ExpirationKey: + return t.Expiration() + case IssuedAtKey: + return t.IssuedAt() + case NotBeforeKey: + return t.NotBefore() + case "": + return clock.Now() + } + return time.Time{} // should *NEVER* reach here, but... +} + +// Validate makes sure that the essential claims stand. +// +// See the various `WithXXX` functions for optional parameters +// that can control the behavior of this method. +func Validate(t Token, options ...ValidateOption) error { + ctx := context.Background() + trunc := time.Second + + var clock Clock = ClockFunc(time.Now) + var skew time.Duration + var validators = []Validator{ + IsIssuedAtValid(), + IsExpirationValid(), + IsNbfValid(), + } + for _, o := range options { + //nolint:forcetypeassert + switch o.Ident() { + case identClock{}: + clock = o.Value().(Clock) + case identAcceptableSkew{}: + skew = o.Value().(time.Duration) + case identTruncation{}: + trunc = o.Value().(time.Duration) + case identContext{}: + ctx = o.Value().(context.Context) + case identValidator{}: + v := o.Value().(Validator) + switch v := v.(type) { + case *isInTimeRange: + if v.c1 != "" { + if err := isSupportedTimeClaim(v.c1); err != nil { + return err + } + validators = append(validators, IsRequired(v.c1)) + } + if v.c2 != "" { + if err := isSupportedTimeClaim(v.c2); err != nil { + return err + } + validators = append(validators, IsRequired(v.c2)) + } + } + validators = append(validators, v) + } + } + + ctx = SetValidationCtxSkew(ctx, skew) + ctx = SetValidationCtxClock(ctx, clock) + ctx = SetValidationCtxTruncation(ctx, trunc) + for _, v := range validators { + if err := v.Validate(ctx, t); err != nil { + return err + } + } + + return nil +} + +type isInTimeRange struct { + c1 string + c2 string + dur time.Duration + less bool // if true, d =< c1 - c2. otherwise d >= c1 - c2 +} + +// MaxDeltaIs implements the logic behind `WithMaxDelta()` option +func MaxDeltaIs(c1, c2 string, dur time.Duration) Validator { + return &isInTimeRange{ + c1: c1, + c2: c2, + dur: dur, + less: true, + } +} + +// MinDeltaIs implements the logic behind `WithMinDelta()` option +func MinDeltaIs(c1, c2 string, dur time.Duration) Validator { + return &isInTimeRange{ + c1: c1, + c2: c2, + dur: dur, + less: false, + } +} + +func (iitr *isInTimeRange) Validate(ctx context.Context, t Token) ValidationError { + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + // We don't check if the claims already exist, because we already did that + // by piggybacking on `required` check. + t1 := timeClaim(t, clock, iitr.c1) + t2 := timeClaim(t, clock, iitr.c2) + if iitr.less { // t1 - t2 <= iitr.dur + // t1 - t2 < iitr.dur + skew + if t1.Sub(t2) > iitr.dur+skew { + return NewValidationError(fmt.Errorf(`iitr between %s and %s exceeds %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew)) + } + } else { + if t1.Sub(t2) < iitr.dur-skew { + return NewValidationError(fmt.Errorf(`iitr between %s and %s is less than %s (skew %s)`, iitr.c1, iitr.c2, iitr.dur, skew)) + } + } + return nil +} + +type ValidationError interface { + error + isValidationError() + Unwrap() error +} + +func NewValidationError(err error) ValidationError { + return &validationError{error: err} +} + +// This is a generic validation error. +type validationError struct { + error +} + +func (validationError) isValidationError() {} +func (err *validationError) Unwrap() error { + return err.error +} + +type missingRequiredClaimError struct { + claim string +} + +func (err *missingRequiredClaimError) Error() string { + return fmt.Sprintf("%q not satisfied: required claim not found", err.claim) +} + +func (err *missingRequiredClaimError) Is(target error) bool { + _, ok := target.(*missingRequiredClaimError) + return ok +} + +func (err *missingRequiredClaimError) isValidationError() {} +func (*missingRequiredClaimError) Unwrap() error { return nil } + +type invalidAudienceError struct { + error +} + +func (err *invalidAudienceError) Is(target error) bool { + _, ok := target.(*invalidAudienceError) + return ok +} + +func (err *invalidAudienceError) isValidationError() {} +func (err *invalidAudienceError) Unwrap() error { + return err.error +} + +func (err *invalidAudienceError) Error() string { + if err.error == nil { + return `"aud" not satisfied` + } + return err.error.Error() +} + +type invalidIssuerError struct { + error +} + +func (err *invalidIssuerError) Is(target error) bool { + _, ok := target.(*invalidIssuerError) + return ok +} + +func (err *invalidIssuerError) isValidationError() {} +func (err *invalidIssuerError) Unwrap() error { + return err.error +} + +func (err *invalidIssuerError) Error() string { + if err.error == nil { + return `"iss" not satisfied` + } + return err.error.Error() +} + +var errTokenExpired = NewValidationError(fmt.Errorf(`"exp" not satisfied`)) +var errInvalidIssuedAt = NewValidationError(fmt.Errorf(`"iat" not satisfied`)) +var errTokenNotYetValid = NewValidationError(fmt.Errorf(`"nbf" not satisfied`)) +var errInvalidAudience = &invalidAudienceError{} +var errInvalidIssuer = &invalidIssuerError{} +var errRequiredClaim = &missingRequiredClaimError{} + +// ErrTokenExpired returns the immutable error used when `exp` claim +// is not satisfied. +// +// The return value should only be used for comparison using `errors.Is()` +func ErrTokenExpired() ValidationError { + return errTokenExpired +} + +// ErrInvalidIssuedAt returns the immutable error used when `iat` claim +// is not satisfied +// +// The return value should only be used for comparison using `errors.Is()` +func ErrInvalidIssuedAt() ValidationError { + return errInvalidIssuedAt +} + +// ErrTokenNotYetValid returns the immutable error used when `nbf` claim +// is not satisfied +// +// The return value should only be used for comparison using `errors.Is()` +func ErrTokenNotYetValid() ValidationError { + return errTokenNotYetValid +} + +// ErrInvalidAudience returns the immutable error used when `aud` claim +// is not satisfied +// +// The return value should only be used for comparison using `errors.Is()` +func ErrInvalidAudience() ValidationError { + return errInvalidAudience +} + +// ErrInvalidIssuer returns the immutable error used when `iss` claim +// is not satisfied +// +// The return value should only be used for comparison using `errors.Is()` +func ErrInvalidIssuer() ValidationError { + return errInvalidIssuer +} + +// ErrMissingRequiredClaim should not have been exported, and will be +// removed in a future release. Use `ErrRequiredClaim()` instead to get +// an error to be used in `errors.Is()` +// +// This function should not have been implemented as a constructor. +// but rather a means to retrieve an opaque and immutable error value +// that could be passed to `errors.Is()`. +func ErrMissingRequiredClaim(name string) ValidationError { + return &missingRequiredClaimError{claim: name} +} + +// ErrRequiredClaim returns the immutable error used when the claim +// specified by `jwt.IsRequired()` is not present. +// +// The return value should only be used for comparison using `errors.Is()` +func ErrRequiredClaim() ValidationError { + return errRequiredClaim +} + +// Validator describes interface to validate a Token. +type Validator interface { + // Validate should return an error if a required conditions is not met. + Validate(context.Context, Token) ValidationError +} + +// ValidatorFunc is a type of Validator that does not have any +// state, that is implemented as a function +type ValidatorFunc func(context.Context, Token) ValidationError + +func (vf ValidatorFunc) Validate(ctx context.Context, tok Token) ValidationError { + return vf(ctx, tok) +} + +type identValidationCtxClock struct{} +type identValidationCtxSkew struct{} +type identValidationCtxTruncation struct{} + +func SetValidationCtxClock(ctx context.Context, cl Clock) context.Context { + return context.WithValue(ctx, identValidationCtxClock{}, cl) +} + +func SetValidationCtxTruncation(ctx context.Context, dur time.Duration) context.Context { + return context.WithValue(ctx, identValidationCtxTruncation{}, dur) +} + +func SetValidationCtxSkew(ctx context.Context, dur time.Duration) context.Context { + return context.WithValue(ctx, identValidationCtxSkew{}, dur) +} + +// ValidationCtxClock returns the Clock object associated with +// the current validation context. This value will always be available +// during validation of tokens. +func ValidationCtxClock(ctx context.Context) Clock { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxClock{}).(Clock) +} + +func ValidationCtxSkew(ctx context.Context) time.Duration { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxSkew{}).(time.Duration) +} + +func ValidationCtxTruncation(ctx context.Context) time.Duration { + //nolint:forcetypeassert + return ctx.Value(identValidationCtxTruncation{}).(time.Duration) +} + +// IsExpirationValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsExpirationValid() Validator { + return ValidatorFunc(isExpirationValid) +} + +func isExpirationValid(ctx context.Context, t Token) ValidationError { + tv := t.Expiration() + if tv.IsZero() || tv.Unix() == 0 { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + // expiration date must be after NOW + if !now.Before(ttv.Add(skew)) { + return ErrTokenExpired() + } + return nil +} + +// IsIssuedAtValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsIssuedAtValid() Validator { + return ValidatorFunc(isIssuedAtValid) +} + +func isIssuedAtValid(ctx context.Context, t Token) ValidationError { + tv := t.IssuedAt() + if tv.IsZero() || tv.Unix() == 0 { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + if now.Before(ttv.Add(-1 * skew)) { + return ErrInvalidIssuedAt() + } + return nil +} + +// IsNbfValid is one of the default validators that will be executed. +// It does not need to be specified by users, but it exists as an +// exported field so that you can check what it does. +// +// The supplied context.Context object must have the "clock" and "skew" +// populated with appropriate values using SetValidationCtxClock() and +// SetValidationCtxSkew() +func IsNbfValid() Validator { + return ValidatorFunc(isNbfValid) +} + +func isNbfValid(ctx context.Context, t Token) ValidationError { + tv := t.NotBefore() + if tv.IsZero() || tv.Unix() == 0 { + return nil + } + + clock := ValidationCtxClock(ctx) // MUST be populated + skew := ValidationCtxSkew(ctx) // MUST be populated + trunc := ValidationCtxTruncation(ctx) // MUST be populated + + // Truncation always happens even for trunc = 0 because + // we also use this to strip monotonic clocks + now := clock.Now().Truncate(trunc) + ttv := tv.Truncate(trunc) + + // "now" cannot be before t - skew, so we check for now > t - skew + ttv = ttv.Add(-1 * skew) + if now.Before(ttv) { + return ErrTokenNotYetValid() + } + return nil +} + +type claimContainsString struct { + name string + value string + makeErr func(error) ValidationError +} + +// ClaimContainsString can be used to check if the claim called `name`, which is +// expected to be a list of strings, contains `value`. Currently because of the +// implementation this will probably only work for `aud` fields. +func ClaimContainsString(name, value string) Validator { + return claimContainsString{ + name: name, + value: value, + makeErr: NewValidationError, + } +} + +// IsValidationError returns true if the error is a validation error +func IsValidationError(err error) bool { + switch err { + case errTokenExpired, errTokenNotYetValid, errInvalidIssuedAt: + return true + default: + switch err.(type) { + case *validationError, *invalidAudienceError, *invalidIssuerError, *missingRequiredClaimError: + return true + default: + return false + } + } +} + +func (ccs claimContainsString) Validate(_ context.Context, t Token) ValidationError { + v, ok := t.Get(ccs.name) + if !ok { + return ccs.makeErr(fmt.Errorf(`claim %q not found`, ccs.name)) + } + + list, ok := v.([]string) + if !ok { + return ccs.makeErr(fmt.Errorf(`claim %q must be a []string (got %T)`, ccs.name, v)) + } + + for _, v := range list { + if v == ccs.value { + return nil + } + } + return ccs.makeErr(fmt.Errorf(`%q not satisfied`, ccs.name)) +} + +func makeInvalidAudienceError(err error) ValidationError { + return &invalidAudienceError{error: err} +} + +// audienceClaimContainsString can be used to check if the audience claim, which is +// expected to be a list of strings, contains `value`. +func audienceClaimContainsString(value string) Validator { + return claimContainsString{ + name: AudienceKey, + value: value, + makeErr: makeInvalidAudienceError, + } +} + +type claimValueIs struct { + name string + value interface{} + makeErr func(error) ValidationError +} + +// ClaimValueIs creates a Validator that checks if the value of claim `name` +// matches `value`. The comparison is done using a simple `==` comparison, +// and therefore complex comparisons may fail using this code. If you +// need to do more, use a custom Validator. +func ClaimValueIs(name string, value interface{}) Validator { + return &claimValueIs{ + name: name, + value: value, + makeErr: NewValidationError, + } +} + +func (cv *claimValueIs) Validate(_ context.Context, t Token) ValidationError { + v, ok := t.Get(cv.name) + if !ok { + return cv.makeErr(fmt.Errorf(`%q not satisfied: claim %q does not exist`, cv.name, cv.name)) + } + if v != cv.value { + return cv.makeErr(fmt.Errorf(`%q not satisfied: values do not match`, cv.name)) + } + return nil +} + +func makeIssuerClaimError(err error) ValidationError { + return &invalidIssuerError{error: err} +} + +// issuerClaimValueIs creates a Validator that checks if the issuer claim +// matches `value`. +func issuerClaimValueIs(value string) Validator { + return &claimValueIs{ + name: IssuerKey, + value: value, + makeErr: makeIssuerClaimError, + } +} + +// IsRequired creates a Validator that checks if the required claim `name` +// exists in the token +func IsRequired(name string) Validator { + return isRequired(name) +} + +type isRequired string + +func (ir isRequired) Validate(_ context.Context, t Token) ValidationError { + name := string(ir) + _, ok := t.Get(name) + if !ok { + return &missingRequiredClaimError{claim: name} + } + return nil +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/jwx.go b/vendor/github.com/lestrrat-go/jwx/v2/jwx.go new file mode 100644 index 0000000000..03e83c8364 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/jwx.go @@ -0,0 +1,44 @@ +//go:generate ./tools/cmd/genreadfile.sh +//go:generate ./tools/cmd/genoptions.sh +//go:generate stringer -type=FormatKind +//go:generate mv formatkind_string.go formatkind_string_gen.go + +// Package jwx contains tools that deal with the various JWx (JOSE) +// technologies such as JWT, JWS, JWE, etc in Go. +// +// JWS (https://tools.ietf.org/html/rfc7515) +// JWE (https://tools.ietf.org/html/rfc7516) +// JWK (https://tools.ietf.org/html/rfc7517) +// JWA (https://tools.ietf.org/html/rfc7518) +// JWT (https://tools.ietf.org/html/rfc7519) +// +// Examples are stored in a separate Go module (to avoid adding +// dependencies to this module), and thus does not appear in the +// online documentation for this module. +// You can find the examples in Github at https://github.com/lestrrat-go/jwx/tree/v2/examples +// +// You can find more high level documentation at Github (https://github.com/lestrrat-go/jwx/tree/v2) +// +// FAQ style documentation can be found in the repository (https://github.com/lestrrat-go/jwx/tree/develop/v2/docs) +package jwx + +import ( + "github.com/lestrrat-go/jwx/v2/internal/json" +) + +// DecoderSettings gives you a access to configure the "encoding/json".Decoder +// used to decode JSON objects within the jwx framework. +func DecoderSettings(options ...JSONOption) { + // XXX We're using this format instead of just passing a single boolean + // in case a new option is to be added some time later + var useNumber bool + for _, option := range options { + //nolint:forcetypeassert + switch option.Ident() { + case identUseNumber{}: + useNumber = option.Value().(bool) + } + } + + json.DecoderSettings(useNumber) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/options.go b/vendor/github.com/lestrrat-go/jwx/v2/options.go new file mode 100644 index 0000000000..2e191abbef --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/options.go @@ -0,0 +1,30 @@ +package jwx + +import "github.com/lestrrat-go/option" + +type identUseNumber struct{} + +type Option = option.Interface + +type JSONOption interface { + Option + isJSONOption() +} + +type jsonOption struct { + Option +} + +func (o *jsonOption) isJSONOption() {} + +func newJSONOption(n interface{}, v interface{}) JSONOption { + return &jsonOption{option.New(n, v)} +} + +// WithUseNumber controls whether the jwx package should unmarshal +// JSON objects with the "encoding/json".Decoder.UseNumber feature on. +// +// Default is false. +func WithUseNumber(b bool) JSONOption { + return newJSONOption(identUseNumber{}, b) +} diff --git a/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go b/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go new file mode 100644 index 0000000000..0f9e32cbc3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jwx/v2/x25519/x25519.go @@ -0,0 +1,115 @@ +package x25519 + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "fmt" + "io" + + "golang.org/x/crypto/curve25519" +) + +// This mirrors ed25519's structure for private/public "keys". jwx +// requires dedicated types for these as they drive +// serialization/deserialization logic, as well as encryption types. +// +// Note that with the x25519 scheme, the private key is a sequence of +// 32 bytes, while the public key is the result of X25519(private, +// basepoint). +// +// Portions of this file are from Go's ed25519.go, which is +// Copyright 2016 The Go Authors. All rights reserved. + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of X25519 public keys +type PublicKey []byte + +// Any methods implemented on PublicKey might need to also be implemented on +// PrivateKey, as the latter embeds the former and will expose its methods. + +// Equal reports whether pub and x have the same value. +func (pub PublicKey) Equal(x crypto.PublicKey) bool { + xx, ok := x.(PublicKey) + if !ok { + return false + } + return bytes.Equal(pub, xx) +} + +// PrivateKey is the type of X25519 private key +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[SeedSize:]) + return PublicKey(publicKey) +} + +// Equal reports whether priv and x have the same value. +func (priv PrivateKey) Equal(x crypto.PrivateKey) bool { + xx, ok := x.(PrivateKey) + if !ok { + return false + } + return bytes.Equal(priv, xx) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 7748. RFC 7748's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:SeedSize]) + return seed +} + +// NewKeyFromSeed calculates a private key from a seed. It will return +// an error if len(seed) is not SeedSize. This function is provided +// for interoperability with RFC 7748. RFC 7748's private keys +// correspond to seeds in this package. +func NewKeyFromSeed(seed []byte) (PrivateKey, error) { + privateKey := make([]byte, PrivateKeySize) + if len(seed) != SeedSize { + return nil, fmt.Errorf("unexpected seed size: %d", len(seed)) + } + copy(privateKey, seed) + public, err := curve25519.X25519(seed, curve25519.Basepoint) + if err != nil { + return nil, fmt.Errorf(`failed to compute public key: %w`, err) + } + copy(privateKey[SeedSize:], public) + + return privateKey, nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey, err := NewKeyFromSeed(seed) + if err != nil { + return nil, nil, err + } + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[SeedSize:]) + + return publicKey, privateKey, nil +} diff --git a/vendor/github.com/lestrrat-go/option/.gitignore b/vendor/github.com/lestrrat-go/option/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/lestrrat-go/option/LICENSE b/vendor/github.com/lestrrat-go/option/LICENSE new file mode 100644 index 0000000000..188ea7685c --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/option/README.md b/vendor/github.com/lestrrat-go/option/README.md new file mode 100644 index 0000000000..ba42acd53c --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/README.md @@ -0,0 +1,136 @@ +# option + +Base object for what I call the "Optional Parameters Pattern". + +The beauty of this pattern is that you can achieve a method that can +take the following simple calling style + +``` +obj.Method(mandatory1, mandatory2) +``` + +or the following, if you want to modify its behavior with optional parameters + +``` +obj.Method(mandatory1, mandatory2, optional1, optional2, optional3) +``` + +Intead of the more clunky zero value for optionals style + +``` +obj.Method(mandatory1, mandatory2, nil, "", 0) +``` + +or the equally cluncky config object style, which requires you to create a +struct with `NamesThatLookReallyLongBecauseItNeedsToIncludeMethodNamesConfig + +``` +cfg := &ConfigForMethod{ + Optional1: ..., + Optional2: ..., + Optional3: ..., +} +obj.Method(mandatory1, mandatory2, &cfg) +``` + +# SYNOPSIS + +This library is intended to be a reusable component to implement +a function with arguments that look like the following: + +``` +obj.Method(mandatory1, mandatory2, optional1, optional2, optional3, ...) +``` + +Internally, we just declare this method as follows: + +``` +func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) { + ... +} +``` + +Option objects take two arguments, its identifier and the value it contains. +The identifier can be anything, but it's usually better to use a an unexported +empty struct so that only you have the ability to generate said option: + +``` +type identOptionalParamOne struct{} +type identOptionalParamTwo struct{} +type identOptionalParamThree struct{} + +func WithOptionOne(v ...) Option { + return option.New(identOptionalParamOne{}, v) +} +``` + +Then you can call the method we described above as + +``` +obj.Method(m1, m2, WithOptionOne(...), WithOptionTwo(...), WithOptionThree(...)) +``` + +Options should be parsed in a code that looks somewhat like this + +``` +func (obj *Object) Method(m1 Type1, m2 Type2, options ...Option) { + paramOne := defaultValueParamOne + for _, option := range options { + switch option.Ident() { + case identOptionalParamOne{}: + paramOne = option.Value().(...) + } + } + ... +} +``` + +# Simple usage + +Most of the times all you need to do is to declare the Option type as an alias +in your code: + +``` +package myawesomepkg + +import "github.com/lestrrat-go/option" + +type Option = option.Interface +``` + +Then you can start definig options like they are described in the SYNOPSIS section. + +# Differentiating Options + +When you have multiple methods and options, and those options can only be passed to +each one the methods, it's hard to see which options should be passed to which method. + +``` +func WithX() Option {} +func WithY() Option {} + +// Now, which of WithX/WithY go to which method? +func (*Obj) Method1(options ...Option) {} +func (*Obj) Method2(options ...Option) {} +``` + +In this case the easiest way to make it obvious is to put an extra layer around +the options so that they have different types + +``` +type Method1Option interface { + Option + method1Option() +} + +type method1Option struct { Option } +func (*method1Option) method1Option() {} + +func WithX() Method1Option { + return &methodOption{option.New(...)} +} + +func (*Obj) Method1(options ...Method1Option) {} +``` + +This way the compiler knows if an option can be passed to a given method. diff --git a/vendor/github.com/lestrrat-go/option/option.go b/vendor/github.com/lestrrat-go/option/option.go new file mode 100644 index 0000000000..418fa36f6b --- /dev/null +++ b/vendor/github.com/lestrrat-go/option/option.go @@ -0,0 +1,32 @@ +package option + +// Interface defines the minimum interface that an option must fulfill +type Interface interface { + // Ident returns the "indentity" of this option, a unique identifier that + // can be used to differentiate between options + Ident() interface{} + + // Value returns the corresponding value. + Value() interface{} +} + +type pair struct { + ident interface{} + value interface{} +} + +// New creates a new Option +func New(ident, value interface{}) Interface { + return &pair{ + ident: ident, + value: value, + } +} + +func (p *pair) Ident() interface{} { + return p.ident +} + +func (p *pair) Value() interface{} { + return p.value +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go deleted file mode 100644 index 8282bd7584..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/field.go +++ /dev/null @@ -1,17 +0,0 @@ -package ext - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// LogError sets the error=true tag on the Span and logs err as an "error" event. -func LogError(span opentracing.Span, err error, fields ...log.Field) { - Error.Set(span, true) - ef := []log.Field{ - log.Event("error"), - log.Error(err), - } - ef = append(ef, fields...) - span.LogFields(ef...) -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go deleted file mode 100644 index a414b5951f..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ /dev/null @@ -1,215 +0,0 @@ -package ext - -import "github.com/opentracing/opentracing-go" - -// These constants define common tag names recommended for better portability across -// tracing systems and languages/platforms. -// -// The tag names are defined as typed strings, so that in addition to the usual use -// -// span.setTag(TagName, value) -// -// they also support value type validation via this additional syntax: -// -// TagName.Set(span, value) -// -var ( - ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server or producer/consumer) - ////////////////////////////////////////////////////////////////////// - - // SpanKind hints at relationship between spans, e.g. client/server - SpanKind = spanKindTagName("span.kind") - - // SpanKindRPCClient marks a span representing the client-side of an RPC - // or other remote call - SpanKindRPCClientEnum = SpanKindEnum("client") - SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} - - // SpanKindRPCServer marks a span representing the server-side of an RPC - // or other remote call - SpanKindRPCServerEnum = SpanKindEnum("server") - SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} - - // SpanKindProducer marks a span representing the producer-side of a - // message bus - SpanKindProducerEnum = SpanKindEnum("producer") - SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} - - // SpanKindConsumer marks a span representing the consumer-side of a - // message bus - SpanKindConsumerEnum = SpanKindEnum("consumer") - SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} - - ////////////////////////////////////////////////////////////////////// - // Component name - ////////////////////////////////////////////////////////////////////// - - // Component is a low-cardinality identifier of the module, library, - // or package that is generating a span. - Component = StringTagName("component") - - ////////////////////////////////////////////////////////////////////// - // Sampling hint - ////////////////////////////////////////////////////////////////////// - - // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = Uint16TagName("sampling.priority") - - ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side or - // server-side to describe the other side/service in a peer-to-peer - // communications, like an RPC call. - ////////////////////////////////////////////////////////////////////// - - // PeerService records the service name of the peer. - PeerService = StringTagName("peer.service") - - // PeerAddress records the address name of the peer. This may be a "ip:port", - // a bare "hostname", a FQDN or even a database DSN substring - // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = StringTagName("peer.address") - - // PeerHostname records the host name of the peer - PeerHostname = StringTagName("peer.hostname") - - // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = IPv4TagName("peer.ipv4") - - // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = StringTagName("peer.ipv6") - - // PeerPort records port number of the peer - PeerPort = Uint16TagName("peer.port") - - ////////////////////////////////////////////////////////////////////// - // HTTP Tags - ////////////////////////////////////////////////////////////////////// - - // HTTPUrl should be the URL of the request being handled in this segment - // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = StringTagName("http.url") - - // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = StringTagName("http.method") - - // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the - // HTTP response. - HTTPStatusCode = Uint16TagName("http.status_code") - - ////////////////////////////////////////////////////////////////////// - // DB Tags - ////////////////////////////////////////////////////////////////////// - - // DBInstance is database instance name. - DBInstance = StringTagName("db.instance") - - // DBStatement is a database statement for the given database type. - // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = StringTagName("db.statement") - - // DBType is a database type. For any SQL database, "sql". - // For others, the lower-case database category, e.g. "redis" - DBType = StringTagName("db.type") - - // DBUser is a username for accessing database. - DBUser = StringTagName("db.user") - - ////////////////////////////////////////////////////////////////////// - // Message Bus Tag - ////////////////////////////////////////////////////////////////////// - - // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = StringTagName("message_bus.destination") - - ////////////////////////////////////////////////////////////////////// - // Error Tag - ////////////////////////////////////////////////////////////////////// - - // Error indicates that operation represented by the span resulted in an error. - Error = BoolTagName("error") -) - -// --- - -// SpanKindEnum represents common span types -type SpanKindEnum string - -type spanKindTagName string - -// Set adds a string tag to the `span` -func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { - span.SetTag(string(tag), value) -} - -type rpcServerOption struct { - clientContext opentracing.SpanContext -} - -func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { - if r.clientContext != nil { - opentracing.ChildOf(r.clientContext).Apply(o) - } - SpanKindRPCServer.Apply(o) -} - -// RPCServerOption returns a StartSpanOption appropriate for an RPC server span -// with `client` representing the metadata for the remote peer Span if available. -// In case client == nil, due to the client not being instrumented, this RPC -// server span will be a root span. -func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { - return rpcServerOption{client} -} - -// --- - -// StringTagName is a common tag name to be set to a string value -type StringTagName string - -// Set adds a string tag to the `span` -func (tag StringTagName) Set(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint32TagName is a common tag name to be set to a uint32 value -type Uint32TagName string - -// Set adds a uint32 tag to the `span` -func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint16TagName is a common tag name to be set to a uint16 value -type Uint16TagName string - -// Set adds a uint16 tag to the `span` -func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { - span.SetTag(string(tag), value) -} - -// --- - -// BoolTagName is a common tag name to be set to a bool value -type BoolTagName string - -// Set adds a bool tag to the `span` -func (tag BoolTagName) Set(span opentracing.Span, value bool) { - span.SetTag(string(tag), value) -} - -// IPv4TagName is a common tag name to be set to an ipv4 value -type IPv4TagName string - -// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag IPv4TagName) SetString(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go deleted file mode 100644 index 2ce96d9d38..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go +++ /dev/null @@ -1,105 +0,0 @@ -package mocktracer - -import ( - "fmt" - "reflect" - "time" - - "github.com/opentracing/opentracing-go/log" -) - -// MockLogRecord represents data logged to a Span via Span.LogFields or -// Span.LogKV. -type MockLogRecord struct { - Timestamp time.Time - Fields []MockKeyValue -} - -// MockKeyValue represents a single key:value pair. -type MockKeyValue struct { - Key string - - // All MockLogRecord values are coerced to strings via fmt.Sprint(), though - // we retain their type separately. - ValueKind reflect.Kind - ValueString string -} - -// EmitString belongs to the log.Encoder interface -func (m *MockKeyValue) EmitString(key, value string) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitBool belongs to the log.Encoder interface -func (m *MockKeyValue) EmitBool(key string, value bool) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitInt belongs to the log.Encoder interface -func (m *MockKeyValue) EmitInt(key string, value int) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitInt32 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitInt32(key string, value int32) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitInt64 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitInt64(key string, value int64) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitUint32 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitUint32(key string, value uint32) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitUint64 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitUint64(key string, value uint64) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitFloat32 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitFloat32(key string, value float32) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitFloat64 belongs to the log.Encoder interface -func (m *MockKeyValue) EmitFloat64(key string, value float64) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitObject belongs to the log.Encoder interface -func (m *MockKeyValue) EmitObject(key string, value interface{}) { - m.Key = key - m.ValueKind = reflect.TypeOf(value).Kind() - m.ValueString = fmt.Sprint(value) -} - -// EmitLazyLogger belongs to the log.Encoder interface -func (m *MockKeyValue) EmitLazyLogger(value log.LazyLogger) { - var meta MockKeyValue - value(&meta) - m.Key = meta.Key - m.ValueKind = meta.ValueKind - m.ValueString = meta.ValueString -} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go deleted file mode 100644 index 8c7932ce65..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go +++ /dev/null @@ -1,284 +0,0 @@ -package mocktracer - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -// MockSpanContext is an opentracing.SpanContext implementation. -// -// It is entirely unsuitable for production use, but appropriate for tests -// that want to verify tracing behavior in other frameworks/applications. -// -// By default all spans have Sampled=true flag, unless {"sampling.priority": 0} -// tag is set. -type MockSpanContext struct { - TraceID int - SpanID int - Sampled bool - Baggage map[string]string -} - -var mockIDSource = uint32(42) - -func nextMockID() int { - return int(atomic.AddUint32(&mockIDSource, 1)) -} - -// ForeachBaggageItem belongs to the SpanContext interface -func (c MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.Baggage { - if !handler(k, v) { - break - } - } -} - -// WithBaggageItem creates a new context with an extra baggage item. -func (c MockSpanContext) WithBaggageItem(key, value string) MockSpanContext { - var newBaggage map[string]string - if c.Baggage == nil { - newBaggage = map[string]string{key: value} - } else { - newBaggage = make(map[string]string, len(c.Baggage)+1) - for k, v := range c.Baggage { - newBaggage[k] = v - } - newBaggage[key] = value - } - // Use positional parameters so the compiler will help catch new fields. - return MockSpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage} -} - -// MockSpan is an opentracing.Span implementation that exports its internal -// state for testing purposes. -type MockSpan struct { - sync.RWMutex - - ParentID int - - OperationName string - StartTime time.Time - FinishTime time.Time - - // All of the below are protected by the embedded RWMutex. - SpanContext MockSpanContext - tags map[string]interface{} - logs []MockLogRecord - tracer *MockTracer -} - -func newMockSpan(t *MockTracer, name string, opts opentracing.StartSpanOptions) *MockSpan { - tags := opts.Tags - if tags == nil { - tags = map[string]interface{}{} - } - traceID := nextMockID() - parentID := int(0) - var baggage map[string]string - sampled := true - if len(opts.References) > 0 { - traceID = opts.References[0].ReferencedContext.(MockSpanContext).TraceID - parentID = opts.References[0].ReferencedContext.(MockSpanContext).SpanID - sampled = opts.References[0].ReferencedContext.(MockSpanContext).Sampled - baggage = opts.References[0].ReferencedContext.(MockSpanContext).Baggage - } - spanContext := MockSpanContext{traceID, nextMockID(), sampled, baggage} - startTime := opts.StartTime - if startTime.IsZero() { - startTime = time.Now() - } - return &MockSpan{ - ParentID: parentID, - OperationName: name, - StartTime: startTime, - tags: tags, - logs: []MockLogRecord{}, - SpanContext: spanContext, - - tracer: t, - } -} - -// Tags returns a copy of tags accumulated by the span so far -func (s *MockSpan) Tags() map[string]interface{} { - s.RLock() - defer s.RUnlock() - tags := make(map[string]interface{}) - for k, v := range s.tags { - tags[k] = v - } - return tags -} - -// Tag returns a single tag -func (s *MockSpan) Tag(k string) interface{} { - s.RLock() - defer s.RUnlock() - return s.tags[k] -} - -// Logs returns a copy of logs accumulated in the span so far -func (s *MockSpan) Logs() []MockLogRecord { - s.RLock() - defer s.RUnlock() - logs := make([]MockLogRecord, len(s.logs)) - copy(logs, s.logs) - return logs -} - -// Context belongs to the Span interface -func (s *MockSpan) Context() opentracing.SpanContext { - s.Lock() - defer s.Unlock() - return s.SpanContext -} - -// SetTag belongs to the Span interface -func (s *MockSpan) SetTag(key string, value interface{}) opentracing.Span { - s.Lock() - defer s.Unlock() - if key == string(ext.SamplingPriority) { - if v, ok := value.(uint16); ok { - s.SpanContext.Sampled = v > 0 - return s - } - if v, ok := value.(int); ok { - s.SpanContext.Sampled = v > 0 - return s - } - } - s.tags[key] = value - return s -} - -// SetBaggageItem belongs to the Span interface -func (s *MockSpan) SetBaggageItem(key, val string) opentracing.Span { - s.Lock() - defer s.Unlock() - s.SpanContext = s.SpanContext.WithBaggageItem(key, val) - return s -} - -// BaggageItem belongs to the Span interface -func (s *MockSpan) BaggageItem(key string) string { - s.RLock() - defer s.RUnlock() - return s.SpanContext.Baggage[key] -} - -// Finish belongs to the Span interface -func (s *MockSpan) Finish() { - s.Lock() - s.FinishTime = time.Now() - s.Unlock() - s.tracer.recordSpan(s) -} - -// FinishWithOptions belongs to the Span interface -func (s *MockSpan) FinishWithOptions(opts opentracing.FinishOptions) { - s.Lock() - s.FinishTime = opts.FinishTime - s.Unlock() - - // Handle any late-bound LogRecords. - for _, lr := range opts.LogRecords { - s.logFieldsWithTimestamp(lr.Timestamp, lr.Fields...) - } - // Handle (deprecated) BulkLogData. - for _, ld := range opts.BulkLogData { - if ld.Payload != nil { - s.logFieldsWithTimestamp( - ld.Timestamp, - log.String("event", ld.Event), - log.Object("payload", ld.Payload)) - } else { - s.logFieldsWithTimestamp( - ld.Timestamp, - log.String("event", ld.Event)) - } - } - - s.tracer.recordSpan(s) -} - -// String allows printing span for debugging -func (s *MockSpan) String() string { - return fmt.Sprintf( - "traceId=%d, spanId=%d, parentId=%d, sampled=%t, name=%s", - s.SpanContext.TraceID, s.SpanContext.SpanID, s.ParentID, - s.SpanContext.Sampled, s.OperationName) -} - -// LogFields belongs to the Span interface -func (s *MockSpan) LogFields(fields ...log.Field) { - s.logFieldsWithTimestamp(time.Now(), fields...) -} - -// The caller MUST NOT hold s.Lock -func (s *MockSpan) logFieldsWithTimestamp(ts time.Time, fields ...log.Field) { - lr := MockLogRecord{ - Timestamp: ts, - Fields: make([]MockKeyValue, len(fields)), - } - for i, f := range fields { - outField := &(lr.Fields[i]) - f.Marshal(outField) - } - - s.Lock() - defer s.Unlock() - s.logs = append(s.logs, lr) -} - -// LogKV belongs to the Span interface. -// -// This implementations coerces all "values" to strings, though that is not -// something all implementations need to do. Indeed, a motivated person can and -// probably should have this do a typed switch on the values. -func (s *MockSpan) LogKV(keyValues ...interface{}) { - if len(keyValues)%2 != 0 { - s.LogFields(log.Error(fmt.Errorf("Non-even keyValues len: %v", len(keyValues)))) - return - } - fields, err := log.InterleavedKVToFields(keyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -// LogEvent belongs to the Span interface -func (s *MockSpan) LogEvent(event string) { - s.LogFields(log.String("event", event)) -} - -// LogEventWithPayload belongs to the Span interface -func (s *MockSpan) LogEventWithPayload(event string, payload interface{}) { - s.LogFields(log.String("event", event), log.Object("payload", payload)) -} - -// Log belongs to the Span interface -func (s *MockSpan) Log(data opentracing.LogData) { - panic("MockSpan.Log() no longer supported") -} - -// SetOperationName belongs to the Span interface -func (s *MockSpan) SetOperationName(operationName string) opentracing.Span { - s.Lock() - defer s.Unlock() - s.OperationName = operationName - return s -} - -// Tracer belongs to the Span interface -func (s *MockSpan) Tracer() opentracing.Tracer { - return s.tracer -} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go deleted file mode 100644 index 4533da7b1f..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go +++ /dev/null @@ -1,105 +0,0 @@ -package mocktracer - -import ( - "sync" - - "github.com/opentracing/opentracing-go" -) - -// New returns a MockTracer opentracing.Tracer implementation that's intended -// to facilitate tests of OpenTracing instrumentation. -func New() *MockTracer { - t := &MockTracer{ - finishedSpans: []*MockSpan{}, - injectors: make(map[interface{}]Injector), - extractors: make(map[interface{}]Extractor), - } - - // register default injectors/extractors - textPropagator := new(TextMapPropagator) - t.RegisterInjector(opentracing.TextMap, textPropagator) - t.RegisterExtractor(opentracing.TextMap, textPropagator) - - httpPropagator := &TextMapPropagator{HTTPHeaders: true} - t.RegisterInjector(opentracing.HTTPHeaders, httpPropagator) - t.RegisterExtractor(opentracing.HTTPHeaders, httpPropagator) - - return t -} - -// MockTracer is only intended for testing OpenTracing instrumentation. -// -// It is entirely unsuitable for production use, but appropriate for tests -// that want to verify tracing behavior in other frameworks/applications. -type MockTracer struct { - sync.RWMutex - finishedSpans []*MockSpan - injectors map[interface{}]Injector - extractors map[interface{}]Extractor -} - -// FinishedSpans returns all spans that have been Finish()'ed since the -// MockTracer was constructed or since the last call to its Reset() method. -func (t *MockTracer) FinishedSpans() []*MockSpan { - t.RLock() - defer t.RUnlock() - spans := make([]*MockSpan, len(t.finishedSpans)) - copy(spans, t.finishedSpans) - return spans -} - -// Reset clears the internally accumulated finished spans. Note that any -// extant MockSpans will still append to finishedSpans when they Finish(), -// even after a call to Reset(). -func (t *MockTracer) Reset() { - t.Lock() - defer t.Unlock() - t.finishedSpans = []*MockSpan{} -} - -// StartSpan belongs to the Tracer interface. -func (t *MockTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { - sso := opentracing.StartSpanOptions{} - for _, o := range opts { - o.Apply(&sso) - } - return newMockSpan(t, operationName, sso) -} - -// RegisterInjector registers injector for given format -func (t *MockTracer) RegisterInjector(format interface{}, injector Injector) { - t.injectors[format] = injector -} - -// RegisterExtractor registers extractor for given format -func (t *MockTracer) RegisterExtractor(format interface{}, extractor Extractor) { - t.extractors[format] = extractor -} - -// Inject belongs to the Tracer interface. -func (t *MockTracer) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { - spanContext, ok := sm.(MockSpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - injector, ok := t.injectors[format] - if !ok { - return opentracing.ErrUnsupportedFormat - } - return injector.Inject(spanContext, carrier) -} - -// Extract belongs to the Tracer interface. -func (t *MockTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - extractor, ok := t.extractors[format] - if !ok { - return nil, opentracing.ErrUnsupportedFormat - } - return extractor.Extract(carrier) -} - -func (t *MockTracer) recordSpan(span *MockSpan) { - t.Lock() - defer t.Unlock() - t.finishedSpans = append(t.finishedSpans, span) -} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go deleted file mode 100644 index 8364f1d182..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go +++ /dev/null @@ -1,120 +0,0 @@ -package mocktracer - -import ( - "fmt" - "net/url" - "strconv" - "strings" - - "github.com/opentracing/opentracing-go" -) - -const mockTextMapIdsPrefix = "mockpfx-ids-" -const mockTextMapBaggagePrefix = "mockpfx-baggage-" - -var emptyContext = MockSpanContext{} - -// Injector is responsible for injecting SpanContext instances in a manner suitable -// for propagation via a format-specific "carrier" object. Typically the -// injection will take place across an RPC boundary, but message queues and -// other IPC mechanisms are also reasonable places to use an Injector. -type Injector interface { - // Inject takes `SpanContext` and injects it into `carrier`. The actual type - // of `carrier` depends on the `format` passed to `Tracer.Inject()`. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if injection fails. - Inject(ctx MockSpanContext, carrier interface{}) error -} - -// Extractor is responsible for extracting SpanContext instances from a -// format-specific "carrier" object. Typically the extraction will take place -// on the server side of an RPC boundary, but message queues and other IPC -// mechanisms are also reasonable places to use an Extractor. -type Extractor interface { - // Extract decodes a SpanContext instance from the given `carrier`, - // or (nil, opentracing.ErrSpanContextNotFound) if no context could - // be found in the `carrier`. - Extract(carrier interface{}) (MockSpanContext, error) -} - -// TextMapPropagator implements Injector/Extractor for TextMap and HTTPHeaders formats. -type TextMapPropagator struct { - HTTPHeaders bool -} - -// Inject implements the Injector interface -func (t *TextMapPropagator) Inject(spanContext MockSpanContext, carrier interface{}) error { - writer, ok := carrier.(opentracing.TextMapWriter) - if !ok { - return opentracing.ErrInvalidCarrier - } - // Ids: - writer.Set(mockTextMapIdsPrefix+"traceid", strconv.Itoa(spanContext.TraceID)) - writer.Set(mockTextMapIdsPrefix+"spanid", strconv.Itoa(spanContext.SpanID)) - writer.Set(mockTextMapIdsPrefix+"sampled", fmt.Sprint(spanContext.Sampled)) - // Baggage: - for baggageKey, baggageVal := range spanContext.Baggage { - safeVal := baggageVal - if t.HTTPHeaders { - safeVal = url.QueryEscape(baggageVal) - } - writer.Set(mockTextMapBaggagePrefix+baggageKey, safeVal) - } - return nil -} - -// Extract implements the Extractor interface -func (t *TextMapPropagator) Extract(carrier interface{}) (MockSpanContext, error) { - reader, ok := carrier.(opentracing.TextMapReader) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - rval := MockSpanContext{0, 0, true, nil} - err := reader.ForeachKey(func(key, val string) error { - lowerKey := strings.ToLower(key) - switch { - case lowerKey == mockTextMapIdsPrefix+"traceid": - // Ids: - i, err := strconv.Atoi(val) - if err != nil { - return err - } - rval.TraceID = i - case lowerKey == mockTextMapIdsPrefix+"spanid": - // Ids: - i, err := strconv.Atoi(val) - if err != nil { - return err - } - rval.SpanID = i - case lowerKey == mockTextMapIdsPrefix+"sampled": - b, err := strconv.ParseBool(val) - if err != nil { - return err - } - rval.Sampled = b - case strings.HasPrefix(lowerKey, mockTextMapBaggagePrefix): - // Baggage: - if rval.Baggage == nil { - rval.Baggage = make(map[string]string) - } - safeVal := val - if t.HTTPHeaders { - // unescape errors are ignored, nothing can be done - if rawVal, err := url.QueryUnescape(val); err == nil { - safeVal = rawVal - } - } - rval.Baggage[lowerKey[len(mockTextMapBaggagePrefix):]] = safeVal - } - return nil - }) - if rval.TraceID == 0 || rval.SpanID == 0 { - return emptyContext, opentracing.ErrSpanContextNotFound - } - if err != nil { - return emptyContext, err - } - return rval, nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/.npmignore b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/.npmignore deleted file mode 100644 index e796b66a81..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/.npmignore +++ /dev/null @@ -1 +0,0 @@ -*.go diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/README.md b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/README.md deleted file mode 100644 index 30dfcee902..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Ory Keto gRPC Client - -This package provides the generated gRPC client for [Ory Keto](https://ory.sh/keto). -Go to [the documentation](https://ory.sh/keto/docs) to learn more diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.pb.go deleted file mode 100644 index 1cb0f641c5..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.pb.go +++ /dev/null @@ -1,391 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/acl.proto - -package acl - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// RelationTuple defines a relation between an Object and a Subject. -type RelationTuple struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The namespace this relation tuple lives in. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The object related by this tuple. - // It is an object in the namespace of the tuple. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // The relation between an Object and a Subject. - Relation string `protobuf:"bytes,3,opt,name=relation,proto3" json:"relation,omitempty"` - // The subject related by this tuple. - // A Subject either represents a concrete subject id or - // a `SubjectSet` that expands to more Subjects. - Subject *Subject `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"` -} - -func (x *RelationTuple) Reset() { - *x = RelationTuple{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RelationTuple) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RelationTuple) ProtoMessage() {} - -func (x *RelationTuple) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RelationTuple.ProtoReflect.Descriptor instead. -func (*RelationTuple) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_acl_proto_rawDescGZIP(), []int{0} -} - -func (x *RelationTuple) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *RelationTuple) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *RelationTuple) GetRelation() string { - if x != nil { - return x.Relation - } - return "" -} - -func (x *RelationTuple) GetSubject() *Subject { - if x != nil { - return x.Subject - } - return nil -} - -// Subject is either a concrete subject id or -// a `SubjectSet` expanding to more Subjects. -type Subject struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reference of this abstract subject. - // - // Types that are assignable to Ref: - // *Subject_Id - // *Subject_Set - Ref isSubject_Ref `protobuf_oneof:"ref"` -} - -func (x *Subject) Reset() { - *x = Subject{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Subject) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Subject) ProtoMessage() {} - -func (x *Subject) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Subject.ProtoReflect.Descriptor instead. -func (*Subject) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_acl_proto_rawDescGZIP(), []int{1} -} - -func (m *Subject) GetRef() isSubject_Ref { - if m != nil { - return m.Ref - } - return nil -} - -func (x *Subject) GetId() string { - if x, ok := x.GetRef().(*Subject_Id); ok { - return x.Id - } - return "" -} - -func (x *Subject) GetSet() *SubjectSet { - if x, ok := x.GetRef().(*Subject_Set); ok { - return x.Set - } - return nil -} - -type isSubject_Ref interface { - isSubject_Ref() -} - -type Subject_Id struct { - // A concrete id of the subject. - Id string `protobuf:"bytes,1,opt,name=id,proto3,oneof"` -} - -type Subject_Set struct { - // A subject set that expands to more Subjects. - // More information are available under [concepts](../concepts/subjects.mdx). - Set *SubjectSet `protobuf:"bytes,2,opt,name=set,proto3,oneof"` -} - -func (*Subject_Id) isSubject_Ref() {} - -func (*Subject_Set) isSubject_Ref() {} - -// SubjectSet refers to all subjects who have -// the same `relation` on an `object`. -type SubjectSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The namespace of the object and relation - // referenced in this subject set. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The object related by this subject set. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // The relation between the object and the subjects. - Relation string `protobuf:"bytes,3,opt,name=relation,proto3" json:"relation,omitempty"` -} - -func (x *SubjectSet) Reset() { - *x = SubjectSet{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubjectSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubjectSet) ProtoMessage() {} - -func (x *SubjectSet) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubjectSet.ProtoReflect.Descriptor instead. -func (*SubjectSet) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_acl_proto_rawDescGZIP(), []int{2} -} - -func (x *SubjectSet) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *SubjectSet) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *SubjectSet) GetRelation() string { - if x != nil { - return x.Relation - } - return "" -} - -var File_ory_keto_acl_v1alpha1_acl_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_acl_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x15, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x22, 0x9b, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x6c, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, - 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x59, 0x0a, 0x07, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x10, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x03, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x73, 0x65, 0x74, 0x42, 0x05, 0x0a, 0x03, 0x72, 0x65, - 0x66, 0x22, 0x5e, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x8b, 0x01, 0x0a, 0x18, 0x73, 0x68, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, - 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x08, - 0x41, 0x63, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, - 0x63, 0x6c, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x61, 0x63, 0x6c, 0xaa, - 0x02, 0x15, 0x4f, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x6c, 0x2e, 0x56, - 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x5c, 0x4b, 0x65, - 0x74, 0x6f, 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_acl_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_acl_proto_rawDescData = file_ory_keto_acl_v1alpha1_acl_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_acl_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_acl_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_acl_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_acl_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_acl_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_acl_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_ory_keto_acl_v1alpha1_acl_proto_goTypes = []interface{}{ - (*RelationTuple)(nil), // 0: ory.keto.acl.v1alpha1.RelationTuple - (*Subject)(nil), // 1: ory.keto.acl.v1alpha1.Subject - (*SubjectSet)(nil), // 2: ory.keto.acl.v1alpha1.SubjectSet -} -var file_ory_keto_acl_v1alpha1_acl_proto_depIdxs = []int32{ - 1, // 0: ory.keto.acl.v1alpha1.RelationTuple.subject:type_name -> ory.keto.acl.v1alpha1.Subject - 2, // 1: ory.keto.acl.v1alpha1.Subject.set:type_name -> ory.keto.acl.v1alpha1.SubjectSet - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_acl_proto_init() } -func file_ory_keto_acl_v1alpha1_acl_proto_init() { - if File_ory_keto_acl_v1alpha1_acl_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelationTuple); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Subject); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubjectSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_ory_keto_acl_v1alpha1_acl_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*Subject_Id)(nil), - (*Subject_Set)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_acl_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_ory_keto_acl_v1alpha1_acl_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_acl_proto_depIdxs, - MessageInfos: file_ory_keto_acl_v1alpha1_acl_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_acl_proto = out.File - file_ory_keto_acl_v1alpha1_acl_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_acl_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_acl_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.proto deleted file mode 100644 index 8f169125ed..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "AclProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - - -// RelationTuple defines a relation between an Object and a Subject. -message RelationTuple { - // The namespace this relation tuple lives in. - string namespace = 1; - // The object related by this tuple. - // It is an object in the namespace of the tuple. - string object = 2; - // The relation between an Object and a Subject. - string relation = 3; - // The subject related by this tuple. - // A Subject either represents a concrete subject id or - // a `SubjectSet` that expands to more Subjects. - Subject subject = 4; -} - -// Subject is either a concrete subject id or -// a `SubjectSet` expanding to more Subjects. -message Subject { - // The reference of this abstract subject. - oneof ref { - // A concrete id of the subject. - string id = 1; - // A subject set that expands to more Subjects. - // More information are available under [concepts](../concepts/subjects.mdx). - SubjectSet set = 2; - } -} - -// SubjectSet refers to all subjects who have -// the same `relation` on an `object`. -message SubjectSet { - // The namespace of the object and relation - // referenced in this subject set. - string namespace = 1; - // The object related by this subject set. - string object = 2; - // The relation between the object and the subjects. - string relation = 3; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_grpc_pb.js deleted file mode 100644 index 97b3a2461d..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_grpc_pb.js +++ /dev/null @@ -1 +0,0 @@ -// GENERATED CODE -- NO SERVICES IN PROTO \ No newline at end of file diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.d.ts deleted file mode 100644 index b69b3b5978..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.d.ts +++ /dev/null @@ -1,115 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/acl.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; - -export class RelationTuple extends jspb.Message { - getNamespace(): string; - setNamespace(value: string): RelationTuple; - - getObject(): string; - setObject(value: string): RelationTuple; - - getRelation(): string; - setRelation(value: string): RelationTuple; - - - hasSubject(): boolean; - clearSubject(): void; - getSubject(): Subject | undefined; - setSubject(value?: Subject): RelationTuple; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): RelationTuple.AsObject; - static toObject(includeInstance: boolean, msg: RelationTuple): RelationTuple.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: RelationTuple, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): RelationTuple; - static deserializeBinaryFromReader(message: RelationTuple, reader: jspb.BinaryReader): RelationTuple; -} - -export namespace RelationTuple { - export type AsObject = { - namespace: string, - object: string, - relation: string, - subject?: Subject.AsObject, - } -} - -export class Subject extends jspb.Message { - - hasId(): boolean; - clearId(): void; - getId(): string; - setId(value: string): Subject; - - - hasSet(): boolean; - clearSet(): void; - getSet(): SubjectSet | undefined; - setSet(value?: SubjectSet): Subject; - - - getRefCase(): Subject.RefCase; - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): Subject.AsObject; - static toObject(includeInstance: boolean, msg: Subject): Subject.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: Subject, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): Subject; - static deserializeBinaryFromReader(message: Subject, reader: jspb.BinaryReader): Subject; -} - -export namespace Subject { - export type AsObject = { - id: string, - set?: SubjectSet.AsObject, - } - - export enum RefCase { - REF_NOT_SET = 0, - - ID = 1, - - SET = 2, - - } - -} - -export class SubjectSet extends jspb.Message { - getNamespace(): string; - setNamespace(value: string): SubjectSet; - - getObject(): string; - setObject(value: string): SubjectSet; - - getRelation(): string; - setRelation(value: string): SubjectSet; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): SubjectSet.AsObject; - static toObject(includeInstance: boolean, msg: SubjectSet): SubjectSet.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: SubjectSet, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): SubjectSet; - static deserializeBinaryFromReader(message: SubjectSet, reader: jspb.BinaryReader): SubjectSet; -} - -export namespace SubjectSet { - export type AsObject = { - namespace: string, - object: string, - relation: string, - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.js deleted file mode 100644 index ffed4ed94b..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/acl_pb.js +++ /dev/null @@ -1,738 +0,0 @@ -// source: ory/keto/acl/v1alpha1/acl.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.RelationTuple', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.Subject', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.Subject.RefCase', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.SubjectSet', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.RelationTuple = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.RelationTuple, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.RelationTuple.displayName = 'proto.ory.keto.acl.v1alpha1.RelationTuple'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.Subject = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.Subject, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.Subject.displayName = 'proto.ory.keto.acl.v1alpha1.Subject'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.SubjectSet = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.SubjectSet, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.SubjectSet.displayName = 'proto.ory.keto.acl.v1alpha1.SubjectSet'; -} - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.RelationTuple.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.RelationTuple} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.toObject = function(includeInstance, msg) { - var f, obj = { - namespace: jspb.Message.getFieldWithDefault(msg, 1, ""), - object: jspb.Message.getFieldWithDefault(msg, 2, ""), - relation: jspb.Message.getFieldWithDefault(msg, 3, ""), - subject: (f = msg.getSubject()) && proto.ory.keto.acl.v1alpha1.Subject.toObject(includeInstance, f) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.RelationTuple; - return proto.ory.keto.acl.v1alpha1.RelationTuple.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.RelationTuple} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setNamespace(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setObject(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setRelation(value); - break; - case 4: - var value = new proto.ory.keto.acl.v1alpha1.Subject; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.Subject.deserializeBinaryFromReader); - msg.setSubject(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.RelationTuple.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.RelationTuple} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getNamespace(); - if (f.length > 0) { - writer.writeString( - 1, - f - ); - } - f = message.getObject(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } - f = message.getRelation(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } - f = message.getSubject(); - if (f != null) { - writer.writeMessage( - 4, - f, - proto.ory.keto.acl.v1alpha1.Subject.serializeBinaryToWriter - ); - } -}; - - -/** - * optional string namespace = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.getNamespace = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.setNamespace = function(value) { - return jspb.Message.setProto3StringField(this, 1, value); -}; - - -/** - * optional string object = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.getObject = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.setObject = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -/** - * optional string relation = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.getRelation = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.setRelation = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - -/** - * optional Subject subject = 4; - * @return {?proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.getSubject = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.Subject} */ ( - jspb.Message.getWrapperField(this, proto.ory.keto.acl.v1alpha1.Subject, 4)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.Subject|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} returns this -*/ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.setSubject = function(value) { - return jspb.Message.setWrapperField(this, 4, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.clearSubject = function() { - return this.setSubject(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.RelationTuple.prototype.hasSubject = function() { - return jspb.Message.getField(this, 4) != null; -}; - - - -/** - * Oneof group definitions for this message. Each group defines the field - * numbers belonging to that group. When of these fields' value is set, all - * other fields in the group are cleared. During deserialization, if multiple - * fields are encountered for a group, only the last value seen will be kept. - * @private {!Array>} - * @const - */ -proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_ = [[1,2]]; - -/** - * @enum {number} - */ -proto.ory.keto.acl.v1alpha1.Subject.RefCase = { - REF_NOT_SET: 0, - ID: 1, - SET: 2 -}; - -/** - * @return {proto.ory.keto.acl.v1alpha1.Subject.RefCase} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.getRefCase = function() { - return /** @type {proto.ory.keto.acl.v1alpha1.Subject.RefCase} */(jspb.Message.computeOneofCase(this, proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_[0])); -}; - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.Subject.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.Subject} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.Subject.toObject = function(includeInstance, msg) { - var f, obj = { - id: jspb.Message.getFieldWithDefault(msg, 1, ""), - set: (f = msg.getSet()) && proto.ory.keto.acl.v1alpha1.SubjectSet.toObject(includeInstance, f) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.Subject.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.Subject; - return proto.ory.keto.acl.v1alpha1.Subject.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.Subject} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.Subject.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setId(value); - break; - case 2: - var value = new proto.ory.keto.acl.v1alpha1.SubjectSet; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.SubjectSet.deserializeBinaryFromReader); - msg.setSet(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.Subject.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.Subject} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.Subject.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = /** @type {string} */ (jspb.Message.getField(message, 1)); - if (f != null) { - writer.writeString( - 1, - f - ); - } - f = message.getSet(); - if (f != null) { - writer.writeMessage( - 2, - f, - proto.ory.keto.acl.v1alpha1.SubjectSet.serializeBinaryToWriter - ); - } -}; - - -/** - * optional string id = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.getId = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.Subject} returns this - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.setId = function(value) { - return jspb.Message.setOneofField(this, 1, proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_[0], value); -}; - - -/** - * Clears the field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.Subject} returns this - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.clearId = function() { - return jspb.Message.setOneofField(this, 1, proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_[0], undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.hasId = function() { - return jspb.Message.getField(this, 1) != null; -}; - - -/** - * optional SubjectSet set = 2; - * @return {?proto.ory.keto.acl.v1alpha1.SubjectSet} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.getSet = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.SubjectSet} */ ( - jspb.Message.getWrapperField(this, proto.ory.keto.acl.v1alpha1.SubjectSet, 2)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.SubjectSet|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.Subject} returns this -*/ -proto.ory.keto.acl.v1alpha1.Subject.prototype.setSet = function(value) { - return jspb.Message.setOneofWrapperField(this, 2, proto.ory.keto.acl.v1alpha1.Subject.oneofGroups_[0], value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.Subject} returns this - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.clearSet = function() { - return this.setSet(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.Subject.prototype.hasSet = function() { - return jspb.Message.getField(this, 2) != null; -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.SubjectSet.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.SubjectSet} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.toObject = function(includeInstance, msg) { - var f, obj = { - namespace: jspb.Message.getFieldWithDefault(msg, 1, ""), - object: jspb.Message.getFieldWithDefault(msg, 2, ""), - relation: jspb.Message.getFieldWithDefault(msg, 3, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectSet} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.SubjectSet; - return proto.ory.keto.acl.v1alpha1.SubjectSet.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.SubjectSet} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectSet} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setNamespace(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setObject(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setRelation(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.SubjectSet.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.SubjectSet} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getNamespace(); - if (f.length > 0) { - writer.writeString( - 1, - f - ); - } - f = message.getObject(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } - f = message.getRelation(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } -}; - - -/** - * optional string namespace = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.getNamespace = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectSet} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.setNamespace = function(value) { - return jspb.Message.setProto3StringField(this, 1, value); -}; - - -/** - * optional string object = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.getObject = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectSet} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.setObject = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -/** - * optional string relation = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.getRelation = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectSet} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectSet.prototype.setRelation = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.pb.go deleted file mode 100644 index e28b9da801..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.pb.go +++ /dev/null @@ -1,354 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/check_service.proto - -package acl - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The request for a CheckService.Check RPC. -// Checks whether a specific subject is related to an object. -type CheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The namespace to evaluate the check. - // - // Note: If you use the expand-API and the check - // evaluates a RelationTuple specifying a SubjectSet as - // subject or due to a rewrite rule in a namespace config - // this check request may involve other namespaces automatically. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // The related object in this check. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // The relation between the Object and the Subject. - Relation string `protobuf:"bytes,3,opt,name=relation,proto3" json:"relation,omitempty"` - // The related subject in this check. - Subject *Subject `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"` - // This field is not implemented yet and has no effect. - // - Latest bool `protobuf:"varint,5,opt,name=latest,proto3" json:"latest,omitempty"` - // This field is not implemented yet and has no effect. - // - Snaptoken string `protobuf:"bytes,6,opt,name=snaptoken,proto3" json:"snaptoken,omitempty"` -} - -func (x *CheckRequest) Reset() { - *x = CheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CheckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CheckRequest) ProtoMessage() {} - -func (x *CheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CheckRequest.ProtoReflect.Descriptor instead. -func (*CheckRequest) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_check_service_proto_rawDescGZIP(), []int{0} -} - -func (x *CheckRequest) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *CheckRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *CheckRequest) GetRelation() string { - if x != nil { - return x.Relation - } - return "" -} - -func (x *CheckRequest) GetSubject() *Subject { - if x != nil { - return x.Subject - } - return nil -} - -func (x *CheckRequest) GetLatest() bool { - if x != nil { - return x.Latest - } - return false -} - -func (x *CheckRequest) GetSnaptoken() string { - if x != nil { - return x.Snaptoken - } - return "" -} - -// The response for a CheckService.Check rpc. -type CheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Whether the specified subject (id) - // is related to the requested object. - // - // It is false by default if no ACL matches. - Allowed bool `protobuf:"varint,1,opt,name=allowed,proto3" json:"allowed,omitempty"` - // This field is not implemented yet and has no effect. - // - Snaptoken string `protobuf:"bytes,2,opt,name=snaptoken,proto3" json:"snaptoken,omitempty"` -} - -func (x *CheckResponse) Reset() { - *x = CheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CheckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CheckResponse) ProtoMessage() {} - -func (x *CheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CheckResponse.ProtoReflect.Descriptor instead. -func (*CheckResponse) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_check_service_proto_rawDescGZIP(), []int{1} -} - -func (x *CheckResponse) GetAllowed() bool { - if x != nil { - return x.Allowed - } - return false -} - -func (x *CheckResponse) GetSnaptoken() string { - if x != nil { - return x.Snaptoken - } - return "" -} - -var File_ory_keto_acl_v1alpha1_check_service_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_check_service_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6f, 0x72, 0x79, - 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x1a, 0x1f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd0, 0x01, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, - 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6e, 0x61, - 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x47, 0x0a, 0x0d, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x32, - 0x62, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x52, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x23, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, - 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, - 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x94, 0x01, 0x0a, 0x18, 0x73, 0x68, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, - 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x42, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x61, 0x63, 0x6c, 0xaa, 0x02, 0x15, 0x4f, 0x72, 0x79, - 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x6c, 0x2e, 0x56, 0x31, 0x41, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x5c, 0x4b, 0x65, 0x74, 0x6f, 0x5c, 0x41, 0x63, - 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_check_service_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_check_service_proto_rawDescData = file_ory_keto_acl_v1alpha1_check_service_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_check_service_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_check_service_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_check_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_check_service_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_check_service_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_ory_keto_acl_v1alpha1_check_service_proto_goTypes = []interface{}{ - (*CheckRequest)(nil), // 0: ory.keto.acl.v1alpha1.CheckRequest - (*CheckResponse)(nil), // 1: ory.keto.acl.v1alpha1.CheckResponse - (*Subject)(nil), // 2: ory.keto.acl.v1alpha1.Subject -} -var file_ory_keto_acl_v1alpha1_check_service_proto_depIdxs = []int32{ - 2, // 0: ory.keto.acl.v1alpha1.CheckRequest.subject:type_name -> ory.keto.acl.v1alpha1.Subject - 0, // 1: ory.keto.acl.v1alpha1.CheckService.Check:input_type -> ory.keto.acl.v1alpha1.CheckRequest - 1, // 2: ory.keto.acl.v1alpha1.CheckService.Check:output_type -> ory.keto.acl.v1alpha1.CheckResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_check_service_proto_init() } -func file_ory_keto_acl_v1alpha1_check_service_proto_init() { - if File_ory_keto_acl_v1alpha1_check_service_proto != nil { - return - } - file_ory_keto_acl_v1alpha1_acl_proto_init() - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_check_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_ory_keto_acl_v1alpha1_check_service_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_check_service_proto_depIdxs, - MessageInfos: file_ory_keto_acl_v1alpha1_check_service_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_check_service_proto = out.File - file_ory_keto_acl_v1alpha1_check_service_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_check_service_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_check_service_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.proto deleted file mode 100644 index 88135a9661..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -import "ory/keto/acl/v1alpha1/acl.proto"; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "CheckServiceProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - -// The service that performs authorization checks -// based on the stored Access Control Lists. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -service CheckService { - // Performs an authorization check. - rpc Check(CheckRequest) returns (CheckResponse); -} - -// The request for a CheckService.Check RPC. -// Checks whether a specific subject is related to an object. -message CheckRequest { - // The namespace to evaluate the check. - // - // Note: If you use the expand-API and the check - // evaluates a RelationTuple specifying a SubjectSet as - // subject or due to a rewrite rule in a namespace config - // this check request may involve other namespaces automatically. - string namespace = 1; - // The related object in this check. - string object = 2; - // The relation between the Object and the Subject. - string relation = 3; - // The related subject in this check. - Subject subject = 4; - // This field is not implemented yet and has no effect. - // - bool latest = 5; - // This field is not implemented yet and has no effect. - // - string snaptoken = 6; -} - -// The response for a CheckService.Check rpc. -message CheckResponse { - // Whether the specified subject (id) - // is related to the requested object. - // - // It is false by default if no ACL matches. - bool allowed = 1; - // This field is not implemented yet and has no effect. - // - string snaptoken = 2; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc.pb.go deleted file mode 100644 index 629b65779f..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package acl - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// CheckServiceClient is the client API for CheckService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type CheckServiceClient interface { - // Performs an authorization check. - Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) -} - -type checkServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewCheckServiceClient(cc grpc.ClientConnInterface) CheckServiceClient { - return &checkServiceClient{cc} -} - -func (c *checkServiceClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { - out := new(CheckResponse) - err := c.cc.Invoke(ctx, "/ory.keto.acl.v1alpha1.CheckService/Check", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// CheckServiceServer is the server API for CheckService service. -// All implementations should embed UnimplementedCheckServiceServer -// for forward compatibility -type CheckServiceServer interface { - // Performs an authorization check. - Check(context.Context, *CheckRequest) (*CheckResponse, error) -} - -// UnimplementedCheckServiceServer should be embedded to have forward compatible implementations. -type UnimplementedCheckServiceServer struct { -} - -func (UnimplementedCheckServiceServer) Check(context.Context, *CheckRequest) (*CheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") -} - -// UnsafeCheckServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to CheckServiceServer will -// result in compilation errors. -type UnsafeCheckServiceServer interface { - mustEmbedUnimplementedCheckServiceServer() -} - -func RegisterCheckServiceServer(s grpc.ServiceRegistrar, srv CheckServiceServer) { - s.RegisterService(&CheckService_ServiceDesc, srv) -} - -func _CheckService_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CheckServiceServer).Check(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ory.keto.acl.v1alpha1.CheckService/Check", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CheckServiceServer).Check(ctx, req.(*CheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// CheckService_ServiceDesc is the grpc.ServiceDesc for CheckService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var CheckService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ory.keto.acl.v1alpha1.CheckService", - HandlerType: (*CheckServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Check", - Handler: _CheckService_Check_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ory/keto/acl/v1alpha1/check_service.proto", -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.d.ts deleted file mode 100644 index 393a6edc9c..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.d.ts +++ /dev/null @@ -1,42 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/check_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as grpc from "grpc"; -import * as ory_keto_acl_v1alpha1_check_service_pb from "../../../../ory/keto/acl/v1alpha1/check_service_pb"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -interface ICheckServiceService extends grpc.ServiceDefinition { - check: ICheckServiceService_ICheck; -} - -interface ICheckServiceService_ICheck extends grpc.MethodDefinition { - path: "/ory.keto.acl.v1alpha1.CheckService/Check"; - requestStream: false; - responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; - responseSerialize: grpc.serialize; - responseDeserialize: grpc.deserialize; -} - -export const CheckServiceService: ICheckServiceService; - -export interface ICheckServiceServer { - check: grpc.handleUnaryCall; -} - -export interface ICheckServiceClient { - check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; - check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; - check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; -} - -export class CheckServiceClient extends grpc.Client implements ICheckServiceClient { - constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); - public check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; - public check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; - public check(request: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse) => void): grpc.ClientUnaryCall; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.js deleted file mode 100644 index 9d3e138650..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_grpc_pb.js +++ /dev/null @@ -1,50 +0,0 @@ -// GENERATED CODE -- DO NOT EDIT! - -'use strict'; -var grpc = require('@grpc/grpc-js'); -var ory_keto_acl_v1alpha1_check_service_pb = require('../../../../ory/keto/acl/v1alpha1/check_service_pb.js'); -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); - -function serialize_ory_keto_acl_v1alpha1_CheckRequest(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_check_service_pb.CheckRequest)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.CheckRequest'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_CheckRequest(buffer_arg) { - return ory_keto_acl_v1alpha1_check_service_pb.CheckRequest.deserializeBinary(new Uint8Array(buffer_arg)); -} - -function serialize_ory_keto_acl_v1alpha1_CheckResponse(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_check_service_pb.CheckResponse)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.CheckResponse'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_CheckResponse(buffer_arg) { - return ory_keto_acl_v1alpha1_check_service_pb.CheckResponse.deserializeBinary(new Uint8Array(buffer_arg)); -} - - -// The service that performs authorization checks -// based on the stored Access Control Lists. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -var CheckServiceService = exports.CheckServiceService = { - // Performs an authorization check. -check: { - path: '/ory.keto.acl.v1alpha1.CheckService/Check', - requestStream: false, - responseStream: false, - requestType: ory_keto_acl_v1alpha1_check_service_pb.CheckRequest, - responseType: ory_keto_acl_v1alpha1_check_service_pb.CheckResponse, - requestSerialize: serialize_ory_keto_acl_v1alpha1_CheckRequest, - requestDeserialize: deserialize_ory_keto_acl_v1alpha1_CheckRequest, - responseSerialize: serialize_ory_keto_acl_v1alpha1_CheckResponse, - responseDeserialize: deserialize_ory_keto_acl_v1alpha1_CheckResponse, - }, -}; - -exports.CheckServiceClient = grpc.makeGenericClientConstructor(CheckServiceService); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.d.ts deleted file mode 100644 index 7c13b6ea89..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.d.ts +++ /dev/null @@ -1,77 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/check_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -export class CheckRequest extends jspb.Message { - getNamespace(): string; - setNamespace(value: string): CheckRequest; - - getObject(): string; - setObject(value: string): CheckRequest; - - getRelation(): string; - setRelation(value: string): CheckRequest; - - - hasSubject(): boolean; - clearSubject(): void; - getSubject(): ory_keto_acl_v1alpha1_acl_pb.Subject | undefined; - setSubject(value?: ory_keto_acl_v1alpha1_acl_pb.Subject): CheckRequest; - - getLatest(): boolean; - setLatest(value: boolean): CheckRequest; - - getSnaptoken(): string; - setSnaptoken(value: string): CheckRequest; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): CheckRequest.AsObject; - static toObject(includeInstance: boolean, msg: CheckRequest): CheckRequest.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: CheckRequest, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): CheckRequest; - static deserializeBinaryFromReader(message: CheckRequest, reader: jspb.BinaryReader): CheckRequest; -} - -export namespace CheckRequest { - export type AsObject = { - namespace: string, - object: string, - relation: string, - subject?: ory_keto_acl_v1alpha1_acl_pb.Subject.AsObject, - latest: boolean, - snaptoken: string, - } -} - -export class CheckResponse extends jspb.Message { - getAllowed(): boolean; - setAllowed(value: boolean): CheckResponse; - - getSnaptoken(): string; - setSnaptoken(value: string): CheckResponse; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): CheckResponse.AsObject; - static toObject(includeInstance: boolean, msg: CheckResponse): CheckResponse.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: CheckResponse, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): CheckResponse; - static deserializeBinaryFromReader(message: CheckResponse, reader: jspb.BinaryReader): CheckResponse; -} - -export namespace CheckResponse { - export type AsObject = { - allowed: boolean, - snaptoken: string, - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.js deleted file mode 100644 index c26177bd70..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/check_service_pb.js +++ /dev/null @@ -1,522 +0,0 @@ -// source: ory/keto/acl/v1alpha1/check_service.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); -goog.object.extend(proto, ory_keto_acl_v1alpha1_acl_pb); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.CheckRequest', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.CheckResponse', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.CheckRequest = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.CheckRequest, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.CheckRequest.displayName = 'proto.ory.keto.acl.v1alpha1.CheckRequest'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.CheckResponse = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.CheckResponse, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.CheckResponse.displayName = 'proto.ory.keto.acl.v1alpha1.CheckResponse'; -} - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.CheckRequest.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.CheckRequest} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.toObject = function(includeInstance, msg) { - var f, obj = { - namespace: jspb.Message.getFieldWithDefault(msg, 1, ""), - object: jspb.Message.getFieldWithDefault(msg, 2, ""), - relation: jspb.Message.getFieldWithDefault(msg, 3, ""), - subject: (f = msg.getSubject()) && ory_keto_acl_v1alpha1_acl_pb.Subject.toObject(includeInstance, f), - latest: jspb.Message.getBooleanFieldWithDefault(msg, 5, false), - snaptoken: jspb.Message.getFieldWithDefault(msg, 6, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.CheckRequest; - return proto.ory.keto.acl.v1alpha1.CheckRequest.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.CheckRequest} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setNamespace(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setObject(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setRelation(value); - break; - case 4: - var value = new ory_keto_acl_v1alpha1_acl_pb.Subject; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.Subject.deserializeBinaryFromReader); - msg.setSubject(value); - break; - case 5: - var value = /** @type {boolean} */ (reader.readBool()); - msg.setLatest(value); - break; - case 6: - var value = /** @type {string} */ (reader.readString()); - msg.setSnaptoken(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.CheckRequest.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.CheckRequest} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getNamespace(); - if (f.length > 0) { - writer.writeString( - 1, - f - ); - } - f = message.getObject(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } - f = message.getRelation(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } - f = message.getSubject(); - if (f != null) { - writer.writeMessage( - 4, - f, - ory_keto_acl_v1alpha1_acl_pb.Subject.serializeBinaryToWriter - ); - } - f = message.getLatest(); - if (f) { - writer.writeBool( - 5, - f - ); - } - f = message.getSnaptoken(); - if (f.length > 0) { - writer.writeString( - 6, - f - ); - } -}; - - -/** - * optional string namespace = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getNamespace = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setNamespace = function(value) { - return jspb.Message.setProto3StringField(this, 1, value); -}; - - -/** - * optional string object = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getObject = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setObject = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -/** - * optional string relation = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getRelation = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setRelation = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - -/** - * optional Subject subject = 4; - * @return {?proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getSubject = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.Subject} */ ( - jspb.Message.getWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.Subject, 4)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.Subject|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this -*/ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setSubject = function(value) { - return jspb.Message.setWrapperField(this, 4, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.clearSubject = function() { - return this.setSubject(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.hasSubject = function() { - return jspb.Message.getField(this, 4) != null; -}; - - -/** - * optional bool latest = 5; - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getLatest = function() { - return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 5, false)); -}; - - -/** - * @param {boolean} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setLatest = function(value) { - return jspb.Message.setProto3BooleanField(this, 5, value); -}; - - -/** - * optional string snaptoken = 6; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.getSnaptoken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 6, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckRequest.prototype.setSnaptoken = function(value) { - return jspb.Message.setProto3StringField(this, 6, value); -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.CheckResponse.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.CheckResponse} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.toObject = function(includeInstance, msg) { - var f, obj = { - allowed: jspb.Message.getBooleanFieldWithDefault(msg, 1, false), - snaptoken: jspb.Message.getFieldWithDefault(msg, 2, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.CheckResponse} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.CheckResponse; - return proto.ory.keto.acl.v1alpha1.CheckResponse.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.CheckResponse} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.CheckResponse} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {boolean} */ (reader.readBool()); - msg.setAllowed(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setSnaptoken(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.CheckResponse.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.CheckResponse} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getAllowed(); - if (f) { - writer.writeBool( - 1, - f - ); - } - f = message.getSnaptoken(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } -}; - - -/** - * optional bool allowed = 1; - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.getAllowed = function() { - return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 1, false)); -}; - - -/** - * @param {boolean} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.setAllowed = function(value) { - return jspb.Message.setProto3BooleanField(this, 1, value); -}; - - -/** - * optional string snaptoken = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.getSnaptoken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.CheckResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.CheckResponse.prototype.setSnaptoken = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.pb.go deleted file mode 100644 index 5adc07a8ac..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.pb.go +++ /dev/null @@ -1,449 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/expand_service.proto - -package acl - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type NodeType int32 - -const ( - NodeType_NODE_TYPE_UNSPECIFIED NodeType = 0 - // This node expands to a union of all children. - NodeType_NODE_TYPE_UNION NodeType = 1 - // Not implemented yet. - NodeType_NODE_TYPE_EXCLUSION NodeType = 2 - // Not implemented yet. - NodeType_NODE_TYPE_INTERSECTION NodeType = 3 - // This node is a leaf and contains no children. - // Its subject is a `SubjectID` unless `max_depth` was reached. - NodeType_NODE_TYPE_LEAF NodeType = 4 -) - -// Enum value maps for NodeType. -var ( - NodeType_name = map[int32]string{ - 0: "NODE_TYPE_UNSPECIFIED", - 1: "NODE_TYPE_UNION", - 2: "NODE_TYPE_EXCLUSION", - 3: "NODE_TYPE_INTERSECTION", - 4: "NODE_TYPE_LEAF", - } - NodeType_value = map[string]int32{ - "NODE_TYPE_UNSPECIFIED": 0, - "NODE_TYPE_UNION": 1, - "NODE_TYPE_EXCLUSION": 2, - "NODE_TYPE_INTERSECTION": 3, - "NODE_TYPE_LEAF": 4, - } -) - -func (x NodeType) Enum() *NodeType { - p := new(NodeType) - *p = x - return p -} - -func (x NodeType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (NodeType) Descriptor() protoreflect.EnumDescriptor { - return file_ory_keto_acl_v1alpha1_expand_service_proto_enumTypes[0].Descriptor() -} - -func (NodeType) Type() protoreflect.EnumType { - return &file_ory_keto_acl_v1alpha1_expand_service_proto_enumTypes[0] -} - -func (x NodeType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use NodeType.Descriptor instead. -func (NodeType) EnumDescriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescGZIP(), []int{0} -} - -// The request for an ExpandService.Expand RPC. -// Expands the given subject set. -type ExpandRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The subject to expand. - Subject *Subject `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` - // The maximum depth of tree to build. - // It is important to set this parameter to a meaningful - // value. Ponder how deep you really want to display this. - MaxDepth int32 `protobuf:"varint,2,opt,name=max_depth,json=maxDepth,proto3" json:"max_depth,omitempty"` - // This field is not implemented yet and has no effect. - // - Snaptoken string `protobuf:"bytes,3,opt,name=snaptoken,proto3" json:"snaptoken,omitempty"` -} - -func (x *ExpandRequest) Reset() { - *x = ExpandRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExpandRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExpandRequest) ProtoMessage() {} - -func (x *ExpandRequest) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExpandRequest.ProtoReflect.Descriptor instead. -func (*ExpandRequest) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ExpandRequest) GetSubject() *Subject { - if x != nil { - return x.Subject - } - return nil -} - -func (x *ExpandRequest) GetMaxDepth() int32 { - if x != nil { - return x.MaxDepth - } - return 0 -} - -func (x *ExpandRequest) GetSnaptoken() string { - if x != nil { - return x.Snaptoken - } - return "" -} - -// The response for a ExpandService.Expand RPC. -type ExpandResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The tree the requested subject set expands to. - // The requested subject set is the subject of the root. - // - // This field can be nil in some circumstances. - Tree *SubjectTree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"` -} - -func (x *ExpandResponse) Reset() { - *x = ExpandResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExpandResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExpandResponse) ProtoMessage() {} - -func (x *ExpandResponse) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExpandResponse.ProtoReflect.Descriptor instead. -func (*ExpandResponse) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ExpandResponse) GetTree() *SubjectTree { - if x != nil { - return x.Tree - } - return nil -} - -type SubjectTree struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of the node. - NodeType NodeType `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=ory.keto.acl.v1alpha1.NodeType" json:"node_type,omitempty"` - // The subject this node represents. - Subject *Subject `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` - // The children of this node. - // - // This is never set if `node_type` == `NODE_TYPE_LEAF`. - Children []*SubjectTree `protobuf:"bytes,3,rep,name=children,proto3" json:"children,omitempty"` -} - -func (x *SubjectTree) Reset() { - *x = SubjectTree{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubjectTree) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubjectTree) ProtoMessage() {} - -func (x *SubjectTree) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubjectTree.ProtoReflect.Descriptor instead. -func (*SubjectTree) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescGZIP(), []int{2} -} - -func (x *SubjectTree) GetNodeType() NodeType { - if x != nil { - return x.NodeType - } - return NodeType_NODE_TYPE_UNSPECIFIED -} - -func (x *SubjectTree) GetSubject() *Subject { - if x != nil { - return x.Subject - } - return nil -} - -func (x *SubjectTree) GetChildren() []*SubjectTree { - if x != nil { - return x.Children - } - return nil -} - -var File_ory_keto_acl_v1alpha1_expand_service_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_expand_service_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x1a, 0x1f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, - 0x6c, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x0d, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, - 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0e, 0x45, - 0x78, 0x70, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, - 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x72, 0x65, 0x65, 0x52, - 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x0b, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x72, 0x65, 0x65, 0x12, 0x3c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, - 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, - 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, - 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x72, 0x65, 0x65, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x2a, 0x83, 0x01, - 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, - 0x44, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4e, 0x4f, - 0x44, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x4c, 0x55, 0x53, 0x49, 0x4f, - 0x4e, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, - 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x45, 0x41, - 0x46, 0x10, 0x04, 0x32, 0x66, 0x0a, 0x0d, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x12, 0x24, - 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, - 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, - 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x95, 0x01, 0x0a, 0x18, - 0x73, 0x68, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x12, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, - 0x65, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, - 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, - 0x61, 0x63, 0x6c, 0xaa, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, 0x41, - 0x63, 0x6c, 0x2e, 0x56, 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, 0x72, - 0x79, 0x5c, 0x4b, 0x65, 0x74, 0x6f, 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescData = file_ory_keto_acl_v1alpha1_expand_service_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_expand_service_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_expand_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_ory_keto_acl_v1alpha1_expand_service_proto_goTypes = []interface{}{ - (NodeType)(0), // 0: ory.keto.acl.v1alpha1.NodeType - (*ExpandRequest)(nil), // 1: ory.keto.acl.v1alpha1.ExpandRequest - (*ExpandResponse)(nil), // 2: ory.keto.acl.v1alpha1.ExpandResponse - (*SubjectTree)(nil), // 3: ory.keto.acl.v1alpha1.SubjectTree - (*Subject)(nil), // 4: ory.keto.acl.v1alpha1.Subject -} -var file_ory_keto_acl_v1alpha1_expand_service_proto_depIdxs = []int32{ - 4, // 0: ory.keto.acl.v1alpha1.ExpandRequest.subject:type_name -> ory.keto.acl.v1alpha1.Subject - 3, // 1: ory.keto.acl.v1alpha1.ExpandResponse.tree:type_name -> ory.keto.acl.v1alpha1.SubjectTree - 0, // 2: ory.keto.acl.v1alpha1.SubjectTree.node_type:type_name -> ory.keto.acl.v1alpha1.NodeType - 4, // 3: ory.keto.acl.v1alpha1.SubjectTree.subject:type_name -> ory.keto.acl.v1alpha1.Subject - 3, // 4: ory.keto.acl.v1alpha1.SubjectTree.children:type_name -> ory.keto.acl.v1alpha1.SubjectTree - 1, // 5: ory.keto.acl.v1alpha1.ExpandService.Expand:input_type -> ory.keto.acl.v1alpha1.ExpandRequest - 2, // 6: ory.keto.acl.v1alpha1.ExpandService.Expand:output_type -> ory.keto.acl.v1alpha1.ExpandResponse - 6, // [6:7] is the sub-list for method output_type - 5, // [5:6] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_expand_service_proto_init() } -func file_ory_keto_acl_v1alpha1_expand_service_proto_init() { - if File_ory_keto_acl_v1alpha1_expand_service_proto != nil { - return - } - file_ory_keto_acl_v1alpha1_acl_proto_init() - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExpandRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExpandResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubjectTree); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_expand_service_proto_rawDesc, - NumEnums: 1, - NumMessages: 3, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_ory_keto_acl_v1alpha1_expand_service_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_expand_service_proto_depIdxs, - EnumInfos: file_ory_keto_acl_v1alpha1_expand_service_proto_enumTypes, - MessageInfos: file_ory_keto_acl_v1alpha1_expand_service_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_expand_service_proto = out.File - file_ory_keto_acl_v1alpha1_expand_service_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_expand_service_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_expand_service_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.proto deleted file mode 100644 index 08fc7c5a94..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -import "ory/keto/acl/v1alpha1/acl.proto"; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "ExpandServiceProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - -// The service that performs subject set expansion -// based on the stored Access Control Lists. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -service ExpandService { - // Expands the subject set into a tree of subjects. - rpc Expand(ExpandRequest) returns (ExpandResponse); -} - -// The request for an ExpandService.Expand RPC. -// Expands the given subject set. -message ExpandRequest { - // The subject to expand. - Subject subject = 1; - // The maximum depth of tree to build. - // It is important to set this parameter to a meaningful - // value. Ponder how deep you really want to display this. - int32 max_depth = 2; - // This field is not implemented yet and has no effect. - // - string snaptoken = 3; -} - -// The response for a ExpandService.Expand RPC. -message ExpandResponse { - // The tree the requested subject set expands to. - // The requested subject set is the subject of the root. - // - // This field can be nil in some circumstances. - SubjectTree tree = 1; -} - -enum NodeType { - NODE_TYPE_UNSPECIFIED = 0; - // This node expands to a union of all children. - NODE_TYPE_UNION = 1; - // Not implemented yet. - NODE_TYPE_EXCLUSION = 2; - // Not implemented yet. - NODE_TYPE_INTERSECTION = 3; - // This node is a leaf and contains no children. - // Its subject is a `SubjectID` unless `max_depth` was reached. - NODE_TYPE_LEAF = 4; -} - -message SubjectTree { - // The type of the node. - NodeType node_type = 1; - // The subject this node represents. - Subject subject = 2; - // The children of this node. - // - // This is never set if `node_type` == `NODE_TYPE_LEAF`. - repeated SubjectTree children = 3; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc.pb.go deleted file mode 100644 index 7b55fb971a..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package acl - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// ExpandServiceClient is the client API for ExpandService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ExpandServiceClient interface { - // Expands the subject set into a tree of subjects. - Expand(ctx context.Context, in *ExpandRequest, opts ...grpc.CallOption) (*ExpandResponse, error) -} - -type expandServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewExpandServiceClient(cc grpc.ClientConnInterface) ExpandServiceClient { - return &expandServiceClient{cc} -} - -func (c *expandServiceClient) Expand(ctx context.Context, in *ExpandRequest, opts ...grpc.CallOption) (*ExpandResponse, error) { - out := new(ExpandResponse) - err := c.cc.Invoke(ctx, "/ory.keto.acl.v1alpha1.ExpandService/Expand", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ExpandServiceServer is the server API for ExpandService service. -// All implementations should embed UnimplementedExpandServiceServer -// for forward compatibility -type ExpandServiceServer interface { - // Expands the subject set into a tree of subjects. - Expand(context.Context, *ExpandRequest) (*ExpandResponse, error) -} - -// UnimplementedExpandServiceServer should be embedded to have forward compatible implementations. -type UnimplementedExpandServiceServer struct { -} - -func (UnimplementedExpandServiceServer) Expand(context.Context, *ExpandRequest) (*ExpandResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Expand not implemented") -} - -// UnsafeExpandServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ExpandServiceServer will -// result in compilation errors. -type UnsafeExpandServiceServer interface { - mustEmbedUnimplementedExpandServiceServer() -} - -func RegisterExpandServiceServer(s grpc.ServiceRegistrar, srv ExpandServiceServer) { - s.RegisterService(&ExpandService_ServiceDesc, srv) -} - -func _ExpandService_Expand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExpandRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExpandServiceServer).Expand(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ory.keto.acl.v1alpha1.ExpandService/Expand", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExpandServiceServer).Expand(ctx, req.(*ExpandRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// ExpandService_ServiceDesc is the grpc.ServiceDesc for ExpandService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ExpandService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ory.keto.acl.v1alpha1.ExpandService", - HandlerType: (*ExpandServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Expand", - Handler: _ExpandService_Expand_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ory/keto/acl/v1alpha1/expand_service.proto", -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.d.ts deleted file mode 100644 index 36305fc058..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.d.ts +++ /dev/null @@ -1,42 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/expand_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as grpc from "grpc"; -import * as ory_keto_acl_v1alpha1_expand_service_pb from "../../../../ory/keto/acl/v1alpha1/expand_service_pb"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -interface IExpandServiceService extends grpc.ServiceDefinition { - expand: IExpandServiceService_IExpand; -} - -interface IExpandServiceService_IExpand extends grpc.MethodDefinition { - path: "/ory.keto.acl.v1alpha1.ExpandService/Expand"; - requestStream: false; - responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; - responseSerialize: grpc.serialize; - responseDeserialize: grpc.deserialize; -} - -export const ExpandServiceService: IExpandServiceService; - -export interface IExpandServiceServer { - expand: grpc.handleUnaryCall; -} - -export interface IExpandServiceClient { - expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; - expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; - expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; -} - -export class ExpandServiceClient extends grpc.Client implements IExpandServiceClient { - constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); - public expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; - public expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; - public expand(request: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse) => void): grpc.ClientUnaryCall; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.js deleted file mode 100644 index 6dc9e238b5..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_grpc_pb.js +++ /dev/null @@ -1,50 +0,0 @@ -// GENERATED CODE -- DO NOT EDIT! - -'use strict'; -var grpc = require('@grpc/grpc-js'); -var ory_keto_acl_v1alpha1_expand_service_pb = require('../../../../ory/keto/acl/v1alpha1/expand_service_pb.js'); -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); - -function serialize_ory_keto_acl_v1alpha1_ExpandRequest(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.ExpandRequest'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_ExpandRequest(buffer_arg) { - return ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest.deserializeBinary(new Uint8Array(buffer_arg)); -} - -function serialize_ory_keto_acl_v1alpha1_ExpandResponse(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.ExpandResponse'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_ExpandResponse(buffer_arg) { - return ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse.deserializeBinary(new Uint8Array(buffer_arg)); -} - - -// The service that performs subject set expansion -// based on the stored Access Control Lists. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -var ExpandServiceService = exports.ExpandServiceService = { - // Expands the subject set into a tree of subjects. -expand: { - path: '/ory.keto.acl.v1alpha1.ExpandService/Expand', - requestStream: false, - responseStream: false, - requestType: ory_keto_acl_v1alpha1_expand_service_pb.ExpandRequest, - responseType: ory_keto_acl_v1alpha1_expand_service_pb.ExpandResponse, - requestSerialize: serialize_ory_keto_acl_v1alpha1_ExpandRequest, - requestDeserialize: deserialize_ory_keto_acl_v1alpha1_ExpandRequest, - responseSerialize: serialize_ory_keto_acl_v1alpha1_ExpandResponse, - responseDeserialize: deserialize_ory_keto_acl_v1alpha1_ExpandResponse, - }, -}; - -exports.ExpandServiceClient = grpc.makeGenericClientConstructor(ExpandServiceService); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.d.ts deleted file mode 100644 index 803dbeff77..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.d.ts +++ /dev/null @@ -1,106 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/expand_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -export class ExpandRequest extends jspb.Message { - - hasSubject(): boolean; - clearSubject(): void; - getSubject(): ory_keto_acl_v1alpha1_acl_pb.Subject | undefined; - setSubject(value?: ory_keto_acl_v1alpha1_acl_pb.Subject): ExpandRequest; - - getMaxDepth(): number; - setMaxDepth(value: number): ExpandRequest; - - getSnaptoken(): string; - setSnaptoken(value: string): ExpandRequest; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): ExpandRequest.AsObject; - static toObject(includeInstance: boolean, msg: ExpandRequest): ExpandRequest.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: ExpandRequest, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): ExpandRequest; - static deserializeBinaryFromReader(message: ExpandRequest, reader: jspb.BinaryReader): ExpandRequest; -} - -export namespace ExpandRequest { - export type AsObject = { - subject?: ory_keto_acl_v1alpha1_acl_pb.Subject.AsObject, - maxDepth: number, - snaptoken: string, - } -} - -export class ExpandResponse extends jspb.Message { - - hasTree(): boolean; - clearTree(): void; - getTree(): SubjectTree | undefined; - setTree(value?: SubjectTree): ExpandResponse; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): ExpandResponse.AsObject; - static toObject(includeInstance: boolean, msg: ExpandResponse): ExpandResponse.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: ExpandResponse, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): ExpandResponse; - static deserializeBinaryFromReader(message: ExpandResponse, reader: jspb.BinaryReader): ExpandResponse; -} - -export namespace ExpandResponse { - export type AsObject = { - tree?: SubjectTree.AsObject, - } -} - -export class SubjectTree extends jspb.Message { - getNodeType(): NodeType; - setNodeType(value: NodeType): SubjectTree; - - - hasSubject(): boolean; - clearSubject(): void; - getSubject(): ory_keto_acl_v1alpha1_acl_pb.Subject | undefined; - setSubject(value?: ory_keto_acl_v1alpha1_acl_pb.Subject): SubjectTree; - - clearChildrenList(): void; - getChildrenList(): Array; - setChildrenList(value: Array): SubjectTree; - addChildren(value?: SubjectTree, index?: number): SubjectTree; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): SubjectTree.AsObject; - static toObject(includeInstance: boolean, msg: SubjectTree): SubjectTree.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: SubjectTree, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): SubjectTree; - static deserializeBinaryFromReader(message: SubjectTree, reader: jspb.BinaryReader): SubjectTree; -} - -export namespace SubjectTree { - export type AsObject = { - nodeType: NodeType, - subject?: ory_keto_acl_v1alpha1_acl_pb.Subject.AsObject, - childrenList: Array, - } -} - -export enum NodeType { - NODE_TYPE_UNSPECIFIED = 0, - NODE_TYPE_UNION = 1, - NODE_TYPE_EXCLUSION = 2, - NODE_TYPE_INTERSECTION = 3, - NODE_TYPE_LEAF = 4, -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.js deleted file mode 100644 index 80c0fb9f65..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/expand_service_pb.js +++ /dev/null @@ -1,698 +0,0 @@ -// source: ory/keto/acl/v1alpha1/expand_service.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); -goog.object.extend(proto, ory_keto_acl_v1alpha1_acl_pb); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.ExpandRequest', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.ExpandResponse', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.NodeType', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.SubjectTree', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.ExpandRequest, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.ExpandRequest.displayName = 'proto.ory.keto.acl.v1alpha1.ExpandRequest'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.ExpandResponse, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.ExpandResponse.displayName = 'proto.ory.keto.acl.v1alpha1.ExpandResponse'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.SubjectTree = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, proto.ory.keto.acl.v1alpha1.SubjectTree.repeatedFields_, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.SubjectTree, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.SubjectTree.displayName = 'proto.ory.keto.acl.v1alpha1.SubjectTree'; -} - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.ExpandRequest.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.ExpandRequest} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.toObject = function(includeInstance, msg) { - var f, obj = { - subject: (f = msg.getSubject()) && ory_keto_acl_v1alpha1_acl_pb.Subject.toObject(includeInstance, f), - maxDepth: jspb.Message.getFieldWithDefault(msg, 2, 0), - snaptoken: jspb.Message.getFieldWithDefault(msg, 3, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.ExpandRequest; - return proto.ory.keto.acl.v1alpha1.ExpandRequest.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.ExpandRequest} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = new ory_keto_acl_v1alpha1_acl_pb.Subject; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.Subject.deserializeBinaryFromReader); - msg.setSubject(value); - break; - case 2: - var value = /** @type {number} */ (reader.readInt32()); - msg.setMaxDepth(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setSnaptoken(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.ExpandRequest.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.ExpandRequest} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getSubject(); - if (f != null) { - writer.writeMessage( - 1, - f, - ory_keto_acl_v1alpha1_acl_pb.Subject.serializeBinaryToWriter - ); - } - f = message.getMaxDepth(); - if (f !== 0) { - writer.writeInt32( - 2, - f - ); - } - f = message.getSnaptoken(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } -}; - - -/** - * optional Subject subject = 1; - * @return {?proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.getSubject = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.Subject} */ ( - jspb.Message.getWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.Subject, 1)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.Subject|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} returns this -*/ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.setSubject = function(value) { - return jspb.Message.setWrapperField(this, 1, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.clearSubject = function() { - return this.setSubject(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.hasSubject = function() { - return jspb.Message.getField(this, 1) != null; -}; - - -/** - * optional int32 max_depth = 2; - * @return {number} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.getMaxDepth = function() { - return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 2, 0)); -}; - - -/** - * @param {number} value - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.setMaxDepth = function(value) { - return jspb.Message.setProto3IntField(this, 2, value); -}; - - -/** - * optional string snaptoken = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.getSnaptoken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ExpandRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ExpandRequest.prototype.setSnaptoken = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.ExpandResponse.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.ExpandResponse} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.toObject = function(includeInstance, msg) { - var f, obj = { - tree: (f = msg.getTree()) && proto.ory.keto.acl.v1alpha1.SubjectTree.toObject(includeInstance, f) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandResponse} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.ExpandResponse; - return proto.ory.keto.acl.v1alpha1.ExpandResponse.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.ExpandResponse} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandResponse} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = new proto.ory.keto.acl.v1alpha1.SubjectTree; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.SubjectTree.deserializeBinaryFromReader); - msg.setTree(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.ExpandResponse.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.ExpandResponse} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getTree(); - if (f != null) { - writer.writeMessage( - 1, - f, - proto.ory.keto.acl.v1alpha1.SubjectTree.serializeBinaryToWriter - ); - } -}; - - -/** - * optional SubjectTree tree = 1; - * @return {?proto.ory.keto.acl.v1alpha1.SubjectTree} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.getTree = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.SubjectTree} */ ( - jspb.Message.getWrapperField(this, proto.ory.keto.acl.v1alpha1.SubjectTree, 1)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.SubjectTree|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.ExpandResponse} returns this -*/ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.setTree = function(value) { - return jspb.Message.setWrapperField(this, 1, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.ExpandResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.clearTree = function() { - return this.setTree(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.ExpandResponse.prototype.hasTree = function() { - return jspb.Message.getField(this, 1) != null; -}; - - - -/** - * List of repeated fields within this message type. - * @private {!Array} - * @const - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.repeatedFields_ = [3]; - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.SubjectTree.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.SubjectTree} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.toObject = function(includeInstance, msg) { - var f, obj = { - nodeType: jspb.Message.getFieldWithDefault(msg, 1, 0), - subject: (f = msg.getSubject()) && ory_keto_acl_v1alpha1_acl_pb.Subject.toObject(includeInstance, f), - childrenList: jspb.Message.toObjectList(msg.getChildrenList(), - proto.ory.keto.acl.v1alpha1.SubjectTree.toObject, includeInstance) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.SubjectTree; - return proto.ory.keto.acl.v1alpha1.SubjectTree.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.SubjectTree} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {!proto.ory.keto.acl.v1alpha1.NodeType} */ (reader.readEnum()); - msg.setNodeType(value); - break; - case 2: - var value = new ory_keto_acl_v1alpha1_acl_pb.Subject; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.Subject.deserializeBinaryFromReader); - msg.setSubject(value); - break; - case 3: - var value = new proto.ory.keto.acl.v1alpha1.SubjectTree; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.SubjectTree.deserializeBinaryFromReader); - msg.addChildren(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.SubjectTree.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.SubjectTree} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getNodeType(); - if (f !== 0.0) { - writer.writeEnum( - 1, - f - ); - } - f = message.getSubject(); - if (f != null) { - writer.writeMessage( - 2, - f, - ory_keto_acl_v1alpha1_acl_pb.Subject.serializeBinaryToWriter - ); - } - f = message.getChildrenList(); - if (f.length > 0) { - writer.writeRepeatedMessage( - 3, - f, - proto.ory.keto.acl.v1alpha1.SubjectTree.serializeBinaryToWriter - ); - } -}; - - -/** - * optional NodeType node_type = 1; - * @return {!proto.ory.keto.acl.v1alpha1.NodeType} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.getNodeType = function() { - return /** @type {!proto.ory.keto.acl.v1alpha1.NodeType} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); -}; - - -/** - * @param {!proto.ory.keto.acl.v1alpha1.NodeType} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.setNodeType = function(value) { - return jspb.Message.setProto3EnumField(this, 1, value); -}; - - -/** - * optional Subject subject = 2; - * @return {?proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.getSubject = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.Subject} */ ( - jspb.Message.getWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.Subject, 2)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.Subject|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} returns this -*/ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.setSubject = function(value) { - return jspb.Message.setWrapperField(this, 2, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.clearSubject = function() { - return this.setSubject(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.hasSubject = function() { - return jspb.Message.getField(this, 2) != null; -}; - - -/** - * repeated SubjectTree children = 3; - * @return {!Array} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.getChildrenList = function() { - return /** @type{!Array} */ ( - jspb.Message.getRepeatedWrapperField(this, proto.ory.keto.acl.v1alpha1.SubjectTree, 3)); -}; - - -/** - * @param {!Array} value - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} returns this -*/ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.setChildrenList = function(value) { - return jspb.Message.setRepeatedWrapperField(this, 3, value); -}; - - -/** - * @param {!proto.ory.keto.acl.v1alpha1.SubjectTree=} opt_value - * @param {number=} opt_index - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.addChildren = function(opt_value, opt_index) { - return jspb.Message.addToRepeatedWrapperField(this, 3, opt_value, proto.ory.keto.acl.v1alpha1.SubjectTree, opt_index); -}; - - -/** - * Clears the list making it empty but non-null. - * @return {!proto.ory.keto.acl.v1alpha1.SubjectTree} returns this - */ -proto.ory.keto.acl.v1alpha1.SubjectTree.prototype.clearChildrenList = function() { - return this.setChildrenList([]); -}; - - -/** - * @enum {number} - */ -proto.ory.keto.acl.v1alpha1.NodeType = { - NODE_TYPE_UNSPECIFIED: 0, - NODE_TYPE_UNION: 1, - NODE_TYPE_EXCLUSION: 2, - NODE_TYPE_INTERSECTION: 3, - NODE_TYPE_LEAF: 4 -}; - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package-lock.json b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package-lock.json deleted file mode 100644 index 9be3d183cb..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package-lock.json +++ /dev/null @@ -1,517 +0,0 @@ -{ - "name": "@ory/keto-grpc-client", - "version": "0.6.0-alpha.1-pre", - "lockfileVersion": 2, - "requires": true, - "packages": { - "": { - "name": "@ory/keto-grpc-client", - "version": "0.6.0-alpha.1-pre", - "dependencies": { - "@grpc/grpc-js": "^1.2.6", - "google-protobuf": "^3.15.0-rc.1" - } - }, - "node_modules/@grpc/grpc-js": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.2.6.tgz", - "integrity": "sha512-wfYwFy7CvVEmBKzeDX1kQQYrv5NBpe8Z+VwXipFvqof3lCXKch7k+4T3grKtptaH5GQ5KP9iKwPr9hMDSynIUw==", - "dependencies": { - "@types/node": ">=12.12.47", - "google-auth-library": "^6.1.1", - "semver": "^6.2.0" - }, - "engines": { - "node": "^8.13.0 || >=10.10.0" - } - }, - "node_modules/@types/node": { - "version": "14.14.27", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.27.tgz", - "integrity": "sha512-Ecfmo4YDQPwuqTCl1yBxLV5ihKfRlkBmzUEDcfIRvDxOTGQEeikr317Ln7Gcv0tjA8dVgKI3rniqW2G1OyKDng==" - }, - "node_modules/abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "dependencies": { - "event-target-shim": "^5.0.0" - }, - "engines": { - "node": ">=6.5" - } - }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/arrify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", - "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", - "engines": { - "node": ">=8" - } - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "node_modules/bignumber.js": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", - "integrity": "sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA==", - "engines": { - "node": "*" - } - }, - "node_modules/buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" - }, - "node_modules/debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "dependencies": { - "safe-buffer": "^5.0.1" - } - }, - "node_modules/event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/fast-text-encoding": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/fast-text-encoding/-/fast-text-encoding-1.0.3.tgz", - "integrity": "sha512-dtm4QZH9nZtcDt8qJiOH9fcQd1NAgi+K1O2DbE6GG1PPCK/BWfOH3idCTRQ4ImXRUOyopDEgDEnVEE7Y/2Wrig==" - }, - "node_modules/gaxios": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-4.1.0.tgz", - "integrity": "sha512-vb0to8xzGnA2qcgywAjtshOKKVDf2eQhJoiL6fHhgW5tVN7wNk7egnYIO9zotfn3lQ3De1VPdf7V5/BWfCtCmg==", - "dependencies": { - "abort-controller": "^3.0.0", - "extend": "^3.0.2", - "https-proxy-agent": "^5.0.0", - "is-stream": "^2.0.0", - "node-fetch": "^2.3.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/gcp-metadata": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-4.2.1.tgz", - "integrity": "sha512-tSk+REe5iq/N+K+SK1XjZJUrFPuDqGZVzCy2vocIHIGmPlTGsa8owXMJwGkrXr73NO0AzhPW4MF2DEHz7P2AVw==", - "dependencies": { - "gaxios": "^4.0.0", - "json-bigint": "^1.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/google-auth-library": { - "version": "6.1.6", - "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-6.1.6.tgz", - "integrity": "sha512-Q+ZjUEvLQj/lrVHF/IQwRo6p3s8Nc44Zk/DALsN+ac3T4HY/g/3rrufkgtl+nZ1TW7DNAw5cTChdVp4apUXVgQ==", - "dependencies": { - "arrify": "^2.0.0", - "base64-js": "^1.3.0", - "ecdsa-sig-formatter": "^1.0.11", - "fast-text-encoding": "^1.0.0", - "gaxios": "^4.0.0", - "gcp-metadata": "^4.2.0", - "gtoken": "^5.0.4", - "jws": "^4.0.0", - "lru-cache": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/google-p12-pem": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/google-p12-pem/-/google-p12-pem-3.0.3.tgz", - "integrity": "sha512-wS0ek4ZtFx/ACKYF3JhyGe5kzH7pgiQ7J5otlumqR9psmWMYc+U9cErKlCYVYHoUaidXHdZ2xbo34kB+S+24hA==", - "dependencies": { - "node-forge": "^0.10.0" - }, - "bin": { - "gp12-pem": "build/src/bin/gp12-pem.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/google-protobuf": { - "version": "3.15.0-rc.1", - "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-3.15.0-rc.1.tgz", - "integrity": "sha512-aDpp0vaiFaM9Thv5H0UC2FJ3x4XzBABM1v+Q5V5GGyTNd1VsZXjKTPdI0ObFgio2tpBWO7nFy2cQmJ9n+6n95A==" - }, - "node_modules/gtoken": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-5.2.1.tgz", - "integrity": "sha512-OY0BfPKe3QnMsY9MzTHTSKn+Vl2l1CcLe6BwDEQj00mbbkl5nyQ/7EUREstg4fQNZ8iYE7br4JJ7TdKeDOPWmw==", - "dependencies": { - "gaxios": "^4.0.0", - "google-p12-pem": "^3.0.3", - "jws": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/https-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", - "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/json-bigint": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", - "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", - "dependencies": { - "bignumber.js": "^9.0.0" - } - }, - "node_modules/jwa": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", - "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", - "dependencies": { - "buffer-equal-constant-time": "1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", - "dependencies": { - "jwa": "^2.0.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", - "engines": { - "node": "4.x || >=6.0.0" - } - }, - "node_modules/node-forge": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", - "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==", - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - } - }, - "dependencies": { - "@grpc/grpc-js": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.2.6.tgz", - "integrity": "sha512-wfYwFy7CvVEmBKzeDX1kQQYrv5NBpe8Z+VwXipFvqof3lCXKch7k+4T3grKtptaH5GQ5KP9iKwPr9hMDSynIUw==", - "requires": { - "@types/node": ">=12.12.47", - "google-auth-library": "^6.1.1", - "semver": "^6.2.0" - } - }, - "@types/node": { - "version": "14.14.27", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.27.tgz", - "integrity": "sha512-Ecfmo4YDQPwuqTCl1yBxLV5ihKfRlkBmzUEDcfIRvDxOTGQEeikr317Ln7Gcv0tjA8dVgKI3rniqW2G1OyKDng==" - }, - "abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "requires": { - "event-target-shim": "^5.0.0" - } - }, - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "requires": { - "debug": "4" - } - }, - "arrify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", - "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==" - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "bignumber.js": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.0.1.tgz", - "integrity": "sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA==" - }, - "buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha1-+OcRMvf/5uAaXJaXpMbz5I1cyBk=" - }, - "debug": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", - "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", - "requires": { - "ms": "2.1.2" - } - }, - "ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==" - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "fast-text-encoding": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/fast-text-encoding/-/fast-text-encoding-1.0.3.tgz", - "integrity": "sha512-dtm4QZH9nZtcDt8qJiOH9fcQd1NAgi+K1O2DbE6GG1PPCK/BWfOH3idCTRQ4ImXRUOyopDEgDEnVEE7Y/2Wrig==" - }, - "gaxios": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-4.1.0.tgz", - "integrity": "sha512-vb0to8xzGnA2qcgywAjtshOKKVDf2eQhJoiL6fHhgW5tVN7wNk7egnYIO9zotfn3lQ3De1VPdf7V5/BWfCtCmg==", - "requires": { - "abort-controller": "^3.0.0", - "extend": "^3.0.2", - "https-proxy-agent": "^5.0.0", - "is-stream": "^2.0.0", - "node-fetch": "^2.3.0" - } - }, - "gcp-metadata": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-4.2.1.tgz", - "integrity": "sha512-tSk+REe5iq/N+K+SK1XjZJUrFPuDqGZVzCy2vocIHIGmPlTGsa8owXMJwGkrXr73NO0AzhPW4MF2DEHz7P2AVw==", - "requires": { - "gaxios": "^4.0.0", - "json-bigint": "^1.0.0" - } - }, - "google-auth-library": { - "version": "6.1.6", - "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-6.1.6.tgz", - "integrity": "sha512-Q+ZjUEvLQj/lrVHF/IQwRo6p3s8Nc44Zk/DALsN+ac3T4HY/g/3rrufkgtl+nZ1TW7DNAw5cTChdVp4apUXVgQ==", - "requires": { - "arrify": "^2.0.0", - "base64-js": "^1.3.0", - "ecdsa-sig-formatter": "^1.0.11", - "fast-text-encoding": "^1.0.0", - "gaxios": "^4.0.0", - "gcp-metadata": "^4.2.0", - "gtoken": "^5.0.4", - "jws": "^4.0.0", - "lru-cache": "^6.0.0" - } - }, - "google-p12-pem": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/google-p12-pem/-/google-p12-pem-3.0.3.tgz", - "integrity": "sha512-wS0ek4ZtFx/ACKYF3JhyGe5kzH7pgiQ7J5otlumqR9psmWMYc+U9cErKlCYVYHoUaidXHdZ2xbo34kB+S+24hA==", - "requires": { - "node-forge": "^0.10.0" - } - }, - "google-protobuf": { - "version": "3.15.0-rc.1", - "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-3.15.0-rc.1.tgz", - "integrity": "sha512-aDpp0vaiFaM9Thv5H0UC2FJ3x4XzBABM1v+Q5V5GGyTNd1VsZXjKTPdI0ObFgio2tpBWO7nFy2cQmJ9n+6n95A==" - }, - "gtoken": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-5.2.1.tgz", - "integrity": "sha512-OY0BfPKe3QnMsY9MzTHTSKn+Vl2l1CcLe6BwDEQj00mbbkl5nyQ/7EUREstg4fQNZ8iYE7br4JJ7TdKeDOPWmw==", - "requires": { - "gaxios": "^4.0.0", - "google-p12-pem": "^3.0.3", - "jws": "^4.0.0" - } - }, - "https-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", - "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", - "requires": { - "agent-base": "6", - "debug": "4" - } - }, - "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==" - }, - "json-bigint": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", - "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", - "requires": { - "bignumber.js": "^9.0.0" - } - }, - "jwa": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.0.tgz", - "integrity": "sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==", - "requires": { - "buffer-equal-constant-time": "1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", - "requires": { - "jwa": "^2.0.0", - "safe-buffer": "^5.0.1" - } - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "requires": { - "yallist": "^4.0.0" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==" - }, - "node-forge": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", - "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==" - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==" - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - } - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package.json b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package.json deleted file mode 100644 index fadbe73895..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/package.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "@ory/keto-grpc-client", - "version": "0.6.0-alpha.1", - "description": "gRPC client for Ory Keto", - "homepage": "https://www.ory.sh/keto", - "bugs": "https://github.com/ory/keto/issues", - "dependencies": { - "@grpc/grpc-js": "^1.2.6", - "google-protobuf": "^3.15.0-rc.1" - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.pb.go deleted file mode 100644 index 5008e51266..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.pb.go +++ /dev/null @@ -1,444 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/read_service.proto - -package acl - -import ( - field_mask "google.golang.org/genproto/protobuf/field_mask" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Request for ReadService.ListRelationTuples RPC. -// See `ListRelationTuplesRequest_Query` for how to filter the query. -type ListRelationTuplesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // All query constraints are concatenated - // with a logical AND operator. - // - // The RelationTuple list from ListRelationTuplesResponse - // is ordered from the newest RelationTuple to the oldest. - Query *ListRelationTuplesRequest_Query `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - // This field is not implemented yet and has no effect. - // - ExpandMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=expand_mask,json=expandMask,proto3" json:"expand_mask,omitempty"` - // This field is not implemented yet and has no effect. - // - Snaptoken string `protobuf:"bytes,3,opt,name=snaptoken,proto3" json:"snaptoken,omitempty"` - // Optional. The maximum number of - // RelationTuples to return in the response. - // - // Default: 100 - PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // Optional. An opaque pagination token returned from - // a previous call to `ListRelationTuples` that - // indicates where the page should start at. - // - // An empty token denotes the first page. All successive - // pages require the token from the previous page. - PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListRelationTuplesRequest) Reset() { - *x = ListRelationTuplesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListRelationTuplesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRelationTuplesRequest) ProtoMessage() {} - -func (x *ListRelationTuplesRequest) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRelationTuplesRequest.ProtoReflect.Descriptor instead. -func (*ListRelationTuplesRequest) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_read_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListRelationTuplesRequest) GetQuery() *ListRelationTuplesRequest_Query { - if x != nil { - return x.Query - } - return nil -} - -func (x *ListRelationTuplesRequest) GetExpandMask() *field_mask.FieldMask { - if x != nil { - return x.ExpandMask - } - return nil -} - -func (x *ListRelationTuplesRequest) GetSnaptoken() string { - if x != nil { - return x.Snaptoken - } - return "" -} - -func (x *ListRelationTuplesRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListRelationTuplesRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The response of a ReadService.ListRelationTuples RPC. -type ListRelationTuplesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The relation tuples matching the list request. - RelationTuples []*RelationTuple `protobuf:"bytes,1,rep,name=relation_tuples,json=relationTuples,proto3" json:"relation_tuples,omitempty"` - // The token required to get the next page. - // If this is the last page, the token will be the empty string. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListRelationTuplesResponse) Reset() { - *x = ListRelationTuplesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListRelationTuplesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRelationTuplesResponse) ProtoMessage() {} - -func (x *ListRelationTuplesResponse) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRelationTuplesResponse.ProtoReflect.Descriptor instead. -func (*ListRelationTuplesResponse) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_read_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListRelationTuplesResponse) GetRelationTuples() []*RelationTuple { - if x != nil { - return x.RelationTuples - } - return nil -} - -func (x *ListRelationTuplesResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The query for listing relation tuples. -// Clients can specify any optional field to -// partially filter for specific relation tuples. -// -// Example use cases (namespace is always required): -// - object only: display a list of all permissions referring to a specific object -// - relation only: get all groups that have members; get all directories that have content -// - object & relation: display all subjects that have a specific permission relation -// - subject & relation: display all groups a subject belongs to; display all objects a subject has access to -// - object & relation & subject: check whether the relation tuple already exists -// -type ListRelationTuplesRequest_Query struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The namespace to query. - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - // Optional. The object to query for. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // Optional. The relation to query for. - Relation string `protobuf:"bytes,3,opt,name=relation,proto3" json:"relation,omitempty"` - // Optional. The subject to query for. - Subject *Subject `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"` -} - -func (x *ListRelationTuplesRequest_Query) Reset() { - *x = ListRelationTuplesRequest_Query{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListRelationTuplesRequest_Query) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRelationTuplesRequest_Query) ProtoMessage() {} - -func (x *ListRelationTuplesRequest_Query) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRelationTuplesRequest_Query.ProtoReflect.Descriptor instead. -func (*ListRelationTuplesRequest_Query) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_read_service_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *ListRelationTuplesRequest_Query) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *ListRelationTuplesRequest_Query) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *ListRelationTuplesRequest_Query) GetRelation() string { - if x != nil { - return x.Relation - } - return "" -} - -func (x *ListRelationTuplesRequest_Query) GetSubject() *Subject { - if x != nil { - return x.Subject - } - return nil -} - -var File_ory_keto_acl_v1alpha1_read_service_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_read_service_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6f, 0x72, 0x79, 0x2e, - 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x1a, 0x1f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x03, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6c, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x36, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x93, 0x01, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, - 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x93, 0x01, - 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, - 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0f, - 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, - 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, - 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x52, 0x0e, 0x72, 0x65, 0x6c, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, - 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x32, 0x88, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x2e, 0x6f, 0x72, 0x79, 0x2e, - 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, - 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x93, - 0x01, 0x0a, 0x18, 0x73, 0x68, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, - 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x52, 0x65, 0x61, - 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, - 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, - 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x3b, 0x61, 0x63, 0x6c, 0xaa, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, - 0x41, 0x63, 0x6c, 0x2e, 0x56, 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, - 0x72, 0x79, 0x5c, 0x4b, 0x65, 0x74, 0x6f, 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_read_service_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_read_service_proto_rawDescData = file_ory_keto_acl_v1alpha1_read_service_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_read_service_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_read_service_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_read_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_read_service_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_read_service_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_ory_keto_acl_v1alpha1_read_service_proto_goTypes = []interface{}{ - (*ListRelationTuplesRequest)(nil), // 0: ory.keto.acl.v1alpha1.ListRelationTuplesRequest - (*ListRelationTuplesResponse)(nil), // 1: ory.keto.acl.v1alpha1.ListRelationTuplesResponse - (*ListRelationTuplesRequest_Query)(nil), // 2: ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query - (*field_mask.FieldMask)(nil), // 3: google.protobuf.FieldMask - (*RelationTuple)(nil), // 4: ory.keto.acl.v1alpha1.RelationTuple - (*Subject)(nil), // 5: ory.keto.acl.v1alpha1.Subject -} -var file_ory_keto_acl_v1alpha1_read_service_proto_depIdxs = []int32{ - 2, // 0: ory.keto.acl.v1alpha1.ListRelationTuplesRequest.query:type_name -> ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query - 3, // 1: ory.keto.acl.v1alpha1.ListRelationTuplesRequest.expand_mask:type_name -> google.protobuf.FieldMask - 4, // 2: ory.keto.acl.v1alpha1.ListRelationTuplesResponse.relation_tuples:type_name -> ory.keto.acl.v1alpha1.RelationTuple - 5, // 3: ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.subject:type_name -> ory.keto.acl.v1alpha1.Subject - 0, // 4: ory.keto.acl.v1alpha1.ReadService.ListRelationTuples:input_type -> ory.keto.acl.v1alpha1.ListRelationTuplesRequest - 1, // 5: ory.keto.acl.v1alpha1.ReadService.ListRelationTuples:output_type -> ory.keto.acl.v1alpha1.ListRelationTuplesResponse - 5, // [5:6] is the sub-list for method output_type - 4, // [4:5] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_read_service_proto_init() } -func file_ory_keto_acl_v1alpha1_read_service_proto_init() { - if File_ory_keto_acl_v1alpha1_read_service_proto != nil { - return - } - file_ory_keto_acl_v1alpha1_acl_proto_init() - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListRelationTuplesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListRelationTuplesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListRelationTuplesRequest_Query); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_read_service_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_ory_keto_acl_v1alpha1_read_service_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_read_service_proto_depIdxs, - MessageInfos: file_ory_keto_acl_v1alpha1_read_service_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_read_service_proto = out.File - file_ory_keto_acl_v1alpha1_read_service_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_read_service_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_read_service_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.proto deleted file mode 100644 index d4152ec528..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service.proto +++ /dev/null @@ -1,91 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -import "ory/keto/acl/v1alpha1/acl.proto"; -import "google/protobuf/field_mask.proto"; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "ReadServiceProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - -// The service to query relation tuples. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -service ReadService { - // Lists ACL relation tuples. - rpc ListRelationTuples(ListRelationTuplesRequest) returns (ListRelationTuplesResponse); -} - -// Request for ReadService.ListRelationTuples RPC. -// See `ListRelationTuplesRequest_Query` for how to filter the query. -message ListRelationTuplesRequest { - // The query for listing relation tuples. - // Clients can specify any optional field to - // partially filter for specific relation tuples. - // - // Example use cases (namespace is always required): - // - object only: display a list of all permissions referring to a specific object - // - relation only: get all groups that have members; get all directories that have content - // - object & relation: display all subjects that have a specific permission relation - // - subject & relation: display all groups a subject belongs to; display all objects a subject has access to - // - object & relation & subject: check whether the relation tuple already exists - // - message Query { - // Required. The namespace to query. - string namespace = 1; - // Optional. The object to query for. - string object = 2; - // Optional. The relation to query for. - string relation = 3; - // Optional. The subject to query for. - Subject subject = 4; - } - // All query constraints are concatenated - // with a logical AND operator. - // - // The RelationTuple list from ListRelationTuplesResponse - // is ordered from the newest RelationTuple to the oldest. - Query query = 1; - // This field is not implemented yet and has no effect. - // - google.protobuf.FieldMask expand_mask = 2; - // This field is not implemented yet and has no effect. - // - string snaptoken = 3; - // Optional. The maximum number of - // RelationTuples to return in the response. - // - // Default: 100 - int32 page_size = 4; - // Optional. An opaque pagination token returned from - // a previous call to `ListRelationTuples` that - // indicates where the page should start at. - // - // An empty token denotes the first page. All successive - // pages require the token from the previous page. - string page_token = 5; -} - -// The response of a ReadService.ListRelationTuples RPC. -message ListRelationTuplesResponse { - // The relation tuples matching the list request. - repeated RelationTuple relation_tuples = 1; - // The token required to get the next page. - // If this is the last page, the token will be the empty string. - string next_page_token = 2; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc.pb.go deleted file mode 100644 index 2ddf34d72e..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package acl - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// ReadServiceClient is the client API for ReadService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ReadServiceClient interface { - // Lists ACL relation tuples. - ListRelationTuples(ctx context.Context, in *ListRelationTuplesRequest, opts ...grpc.CallOption) (*ListRelationTuplesResponse, error) -} - -type readServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewReadServiceClient(cc grpc.ClientConnInterface) ReadServiceClient { - return &readServiceClient{cc} -} - -func (c *readServiceClient) ListRelationTuples(ctx context.Context, in *ListRelationTuplesRequest, opts ...grpc.CallOption) (*ListRelationTuplesResponse, error) { - out := new(ListRelationTuplesResponse) - err := c.cc.Invoke(ctx, "/ory.keto.acl.v1alpha1.ReadService/ListRelationTuples", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ReadServiceServer is the server API for ReadService service. -// All implementations should embed UnimplementedReadServiceServer -// for forward compatibility -type ReadServiceServer interface { - // Lists ACL relation tuples. - ListRelationTuples(context.Context, *ListRelationTuplesRequest) (*ListRelationTuplesResponse, error) -} - -// UnimplementedReadServiceServer should be embedded to have forward compatible implementations. -type UnimplementedReadServiceServer struct { -} - -func (UnimplementedReadServiceServer) ListRelationTuples(context.Context, *ListRelationTuplesRequest) (*ListRelationTuplesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListRelationTuples not implemented") -} - -// UnsafeReadServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ReadServiceServer will -// result in compilation errors. -type UnsafeReadServiceServer interface { - mustEmbedUnimplementedReadServiceServer() -} - -func RegisterReadServiceServer(s grpc.ServiceRegistrar, srv ReadServiceServer) { - s.RegisterService(&ReadService_ServiceDesc, srv) -} - -func _ReadService_ListRelationTuples_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListRelationTuplesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ReadServiceServer).ListRelationTuples(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ory.keto.acl.v1alpha1.ReadService/ListRelationTuples", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ReadServiceServer).ListRelationTuples(ctx, req.(*ListRelationTuplesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// ReadService_ServiceDesc is the grpc.ServiceDesc for ReadService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ReadService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ory.keto.acl.v1alpha1.ReadService", - HandlerType: (*ReadServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListRelationTuples", - Handler: _ReadService_ListRelationTuples_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ory/keto/acl/v1alpha1/read_service.proto", -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.d.ts deleted file mode 100644 index d86ee4aa39..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.d.ts +++ /dev/null @@ -1,43 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/read_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as grpc from "grpc"; -import * as ory_keto_acl_v1alpha1_read_service_pb from "../../../../ory/keto/acl/v1alpha1/read_service_pb"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; -import * as google_protobuf_field_mask_pb from "google-protobuf/google/protobuf/field_mask_pb"; - -interface IReadServiceService extends grpc.ServiceDefinition { - listRelationTuples: IReadServiceService_IListRelationTuples; -} - -interface IReadServiceService_IListRelationTuples extends grpc.MethodDefinition { - path: "/ory.keto.acl.v1alpha1.ReadService/ListRelationTuples"; - requestStream: false; - responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; - responseSerialize: grpc.serialize; - responseDeserialize: grpc.deserialize; -} - -export const ReadServiceService: IReadServiceService; - -export interface IReadServiceServer { - listRelationTuples: grpc.handleUnaryCall; -} - -export interface IReadServiceClient { - listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; - listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; - listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; -} - -export class ReadServiceClient extends grpc.Client implements IReadServiceClient { - constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); - public listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; - public listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; - public listRelationTuples(request: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse) => void): grpc.ClientUnaryCall; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.js deleted file mode 100644 index 56f1f7d015..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_grpc_pb.js +++ /dev/null @@ -1,50 +0,0 @@ -// GENERATED CODE -- DO NOT EDIT! - -'use strict'; -var grpc = require('@grpc/grpc-js'); -var ory_keto_acl_v1alpha1_read_service_pb = require('../../../../ory/keto/acl/v1alpha1/read_service_pb.js'); -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); -var google_protobuf_field_mask_pb = require('google-protobuf/google/protobuf/field_mask_pb.js'); - -function serialize_ory_keto_acl_v1alpha1_ListRelationTuplesRequest(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.ListRelationTuplesRequest'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_ListRelationTuplesRequest(buffer_arg) { - return ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest.deserializeBinary(new Uint8Array(buffer_arg)); -} - -function serialize_ory_keto_acl_v1alpha1_ListRelationTuplesResponse(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.ListRelationTuplesResponse'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_ListRelationTuplesResponse(buffer_arg) { - return ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse.deserializeBinary(new Uint8Array(buffer_arg)); -} - - -// The service to query relation tuples. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis). -var ReadServiceService = exports.ReadServiceService = { - // Lists ACL relation tuples. -listRelationTuples: { - path: '/ory.keto.acl.v1alpha1.ReadService/ListRelationTuples', - requestStream: false, - responseStream: false, - requestType: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesRequest, - responseType: ory_keto_acl_v1alpha1_read_service_pb.ListRelationTuplesResponse, - requestSerialize: serialize_ory_keto_acl_v1alpha1_ListRelationTuplesRequest, - requestDeserialize: deserialize_ory_keto_acl_v1alpha1_ListRelationTuplesRequest, - responseSerialize: serialize_ory_keto_acl_v1alpha1_ListRelationTuplesResponse, - responseDeserialize: deserialize_ory_keto_acl_v1alpha1_ListRelationTuplesResponse, - }, -}; - -exports.ReadServiceClient = grpc.makeGenericClientConstructor(ReadServiceService); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.d.ts deleted file mode 100644 index 9c4a40d685..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.d.ts +++ /dev/null @@ -1,117 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/read_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; -import * as google_protobuf_field_mask_pb from "google-protobuf/google/protobuf/field_mask_pb"; - -export class ListRelationTuplesRequest extends jspb.Message { - - hasQuery(): boolean; - clearQuery(): void; - getQuery(): ListRelationTuplesRequest.Query | undefined; - setQuery(value?: ListRelationTuplesRequest.Query): ListRelationTuplesRequest; - - - hasExpandMask(): boolean; - clearExpandMask(): void; - getExpandMask(): google_protobuf_field_mask_pb.FieldMask | undefined; - setExpandMask(value?: google_protobuf_field_mask_pb.FieldMask): ListRelationTuplesRequest; - - getSnaptoken(): string; - setSnaptoken(value: string): ListRelationTuplesRequest; - - getPageSize(): number; - setPageSize(value: number): ListRelationTuplesRequest; - - getPageToken(): string; - setPageToken(value: string): ListRelationTuplesRequest; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): ListRelationTuplesRequest.AsObject; - static toObject(includeInstance: boolean, msg: ListRelationTuplesRequest): ListRelationTuplesRequest.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: ListRelationTuplesRequest, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): ListRelationTuplesRequest; - static deserializeBinaryFromReader(message: ListRelationTuplesRequest, reader: jspb.BinaryReader): ListRelationTuplesRequest; -} - -export namespace ListRelationTuplesRequest { - export type AsObject = { - query?: ListRelationTuplesRequest.Query.AsObject, - expandMask?: google_protobuf_field_mask_pb.FieldMask.AsObject, - snaptoken: string, - pageSize: number, - pageToken: string, - } - - - export class Query extends jspb.Message { - getNamespace(): string; - setNamespace(value: string): Query; - - getObject(): string; - setObject(value: string): Query; - - getRelation(): string; - setRelation(value: string): Query; - - - hasSubject(): boolean; - clearSubject(): void; - getSubject(): ory_keto_acl_v1alpha1_acl_pb.Subject | undefined; - setSubject(value?: ory_keto_acl_v1alpha1_acl_pb.Subject): Query; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): Query.AsObject; - static toObject(includeInstance: boolean, msg: Query): Query.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: Query, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): Query; - static deserializeBinaryFromReader(message: Query, reader: jspb.BinaryReader): Query; - } - - export namespace Query { - export type AsObject = { - namespace: string, - object: string, - relation: string, - subject?: ory_keto_acl_v1alpha1_acl_pb.Subject.AsObject, - } - } - -} - -export class ListRelationTuplesResponse extends jspb.Message { - clearRelationTuplesList(): void; - getRelationTuplesList(): Array; - setRelationTuplesList(value: Array): ListRelationTuplesResponse; - addRelationTuples(value?: ory_keto_acl_v1alpha1_acl_pb.RelationTuple, index?: number): ory_keto_acl_v1alpha1_acl_pb.RelationTuple; - - getNextPageToken(): string; - setNextPageToken(value: string): ListRelationTuplesResponse; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): ListRelationTuplesResponse.AsObject; - static toObject(includeInstance: boolean, msg: ListRelationTuplesResponse): ListRelationTuplesResponse.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: ListRelationTuplesResponse, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): ListRelationTuplesResponse; - static deserializeBinaryFromReader(message: ListRelationTuplesResponse, reader: jspb.BinaryReader): ListRelationTuplesResponse; -} - -export namespace ListRelationTuplesResponse { - export type AsObject = { - relationTuplesList: Array, - nextPageToken: string, - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.js deleted file mode 100644 index fde6f5da39..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/read_service_pb.js +++ /dev/null @@ -1,808 +0,0 @@ -// source: ory/keto/acl/v1alpha1/read_service.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); -goog.object.extend(proto, ory_keto_acl_v1alpha1_acl_pb); -var google_protobuf_field_mask_pb = require('google-protobuf/google/protobuf/field_mask_pb.js'); -goog.object.extend(proto, google_protobuf_field_mask_pb); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.displayName = 'proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.displayName = 'proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.repeatedFields_, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.displayName = 'proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse'; -} - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.toObject = function(includeInstance, msg) { - var f, obj = { - query: (f = msg.getQuery()) && proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.toObject(includeInstance, f), - expandMask: (f = msg.getExpandMask()) && google_protobuf_field_mask_pb.FieldMask.toObject(includeInstance, f), - snaptoken: jspb.Message.getFieldWithDefault(msg, 3, ""), - pageSize: jspb.Message.getFieldWithDefault(msg, 4, 0), - pageToken: jspb.Message.getFieldWithDefault(msg, 5, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest; - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = new proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.deserializeBinaryFromReader); - msg.setQuery(value); - break; - case 2: - var value = new google_protobuf_field_mask_pb.FieldMask; - reader.readMessage(value,google_protobuf_field_mask_pb.FieldMask.deserializeBinaryFromReader); - msg.setExpandMask(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setSnaptoken(value); - break; - case 4: - var value = /** @type {number} */ (reader.readInt32()); - msg.setPageSize(value); - break; - case 5: - var value = /** @type {string} */ (reader.readString()); - msg.setPageToken(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getQuery(); - if (f != null) { - writer.writeMessage( - 1, - f, - proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.serializeBinaryToWriter - ); - } - f = message.getExpandMask(); - if (f != null) { - writer.writeMessage( - 2, - f, - google_protobuf_field_mask_pb.FieldMask.serializeBinaryToWriter - ); - } - f = message.getSnaptoken(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } - f = message.getPageSize(); - if (f !== 0) { - writer.writeInt32( - 4, - f - ); - } - f = message.getPageToken(); - if (f.length > 0) { - writer.writeString( - 5, - f - ); - } -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.toObject = function(includeInstance, msg) { - var f, obj = { - namespace: jspb.Message.getFieldWithDefault(msg, 1, ""), - object: jspb.Message.getFieldWithDefault(msg, 2, ""), - relation: jspb.Message.getFieldWithDefault(msg, 3, ""), - subject: (f = msg.getSubject()) && ory_keto_acl_v1alpha1_acl_pb.Subject.toObject(includeInstance, f) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query; - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setNamespace(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setObject(value); - break; - case 3: - var value = /** @type {string} */ (reader.readString()); - msg.setRelation(value); - break; - case 4: - var value = new ory_keto_acl_v1alpha1_acl_pb.Subject; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.Subject.deserializeBinaryFromReader); - msg.setSubject(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getNamespace(); - if (f.length > 0) { - writer.writeString( - 1, - f - ); - } - f = message.getObject(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } - f = message.getRelation(); - if (f.length > 0) { - writer.writeString( - 3, - f - ); - } - f = message.getSubject(); - if (f != null) { - writer.writeMessage( - 4, - f, - ory_keto_acl_v1alpha1_acl_pb.Subject.serializeBinaryToWriter - ); - } -}; - - -/** - * optional string namespace = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.getNamespace = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.setNamespace = function(value) { - return jspb.Message.setProto3StringField(this, 1, value); -}; - - -/** - * optional string object = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.getObject = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.setObject = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -/** - * optional string relation = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.getRelation = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.setRelation = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - -/** - * optional Subject subject = 4; - * @return {?proto.ory.keto.acl.v1alpha1.Subject} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.getSubject = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.Subject} */ ( - jspb.Message.getWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.Subject, 4)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.Subject|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} returns this -*/ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.setSubject = function(value) { - return jspb.Message.setWrapperField(this, 4, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.clearSubject = function() { - return this.setSubject(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query.prototype.hasSubject = function() { - return jspb.Message.getField(this, 4) != null; -}; - - -/** - * optional Query query = 1; - * @return {?proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.getQuery = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query} */ ( - jspb.Message.getWrapperField(this, proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query, 1)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.Query|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this -*/ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.setQuery = function(value) { - return jspb.Message.setWrapperField(this, 1, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.clearQuery = function() { - return this.setQuery(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.hasQuery = function() { - return jspb.Message.getField(this, 1) != null; -}; - - -/** - * optional google.protobuf.FieldMask expand_mask = 2; - * @return {?proto.google.protobuf.FieldMask} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.getExpandMask = function() { - return /** @type{?proto.google.protobuf.FieldMask} */ ( - jspb.Message.getWrapperField(this, google_protobuf_field_mask_pb.FieldMask, 2)); -}; - - -/** - * @param {?proto.google.protobuf.FieldMask|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this -*/ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.setExpandMask = function(value) { - return jspb.Message.setWrapperField(this, 2, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.clearExpandMask = function() { - return this.setExpandMask(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.hasExpandMask = function() { - return jspb.Message.getField(this, 2) != null; -}; - - -/** - * optional string snaptoken = 3; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.getSnaptoken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.setSnaptoken = function(value) { - return jspb.Message.setProto3StringField(this, 3, value); -}; - - -/** - * optional int32 page_size = 4; - * @return {number} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.getPageSize = function() { - return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 4, 0)); -}; - - -/** - * @param {number} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.setPageSize = function(value) { - return jspb.Message.setProto3IntField(this, 4, value); -}; - - -/** - * optional string page_token = 5; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.getPageToken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 5, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesRequest.prototype.setPageToken = function(value) { - return jspb.Message.setProto3StringField(this, 5, value); -}; - - - -/** - * List of repeated fields within this message type. - * @private {!Array} - * @const - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.repeatedFields_ = [1]; - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.toObject = function(includeInstance, msg) { - var f, obj = { - relationTuplesList: jspb.Message.toObjectList(msg.getRelationTuplesList(), - ory_keto_acl_v1alpha1_acl_pb.RelationTuple.toObject, includeInstance), - nextPageToken: jspb.Message.getFieldWithDefault(msg, 2, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse; - return proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = new ory_keto_acl_v1alpha1_acl_pb.RelationTuple; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.RelationTuple.deserializeBinaryFromReader); - msg.addRelationTuples(value); - break; - case 2: - var value = /** @type {string} */ (reader.readString()); - msg.setNextPageToken(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getRelationTuplesList(); - if (f.length > 0) { - writer.writeRepeatedMessage( - 1, - f, - ory_keto_acl_v1alpha1_acl_pb.RelationTuple.serializeBinaryToWriter - ); - } - f = message.getNextPageToken(); - if (f.length > 0) { - writer.writeString( - 2, - f - ); - } -}; - - -/** - * repeated RelationTuple relation_tuples = 1; - * @return {!Array} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.getRelationTuplesList = function() { - return /** @type{!Array} */ ( - jspb.Message.getRepeatedWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.RelationTuple, 1)); -}; - - -/** - * @param {!Array} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} returns this -*/ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.setRelationTuplesList = function(value) { - return jspb.Message.setRepeatedWrapperField(this, 1, value); -}; - - -/** - * @param {!proto.ory.keto.acl.v1alpha1.RelationTuple=} opt_value - * @param {number=} opt_index - * @return {!proto.ory.keto.acl.v1alpha1.RelationTuple} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.addRelationTuples = function(opt_value, opt_index) { - return jspb.Message.addToRepeatedWrapperField(this, 1, opt_value, proto.ory.keto.acl.v1alpha1.RelationTuple, opt_index); -}; - - -/** - * Clears the list making it empty but non-null. - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.clearRelationTuplesList = function() { - return this.setRelationTuplesList([]); -}; - - -/** - * optional string next_page_token = 2; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.getNextPageToken = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.ListRelationTuplesResponse.prototype.setNextPageToken = function(value) { - return jspb.Message.setProto3StringField(this, 2, value); -}; - - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.pb.go deleted file mode 100644 index 9485ccbf09..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.pb.go +++ /dev/null @@ -1,218 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/version.proto - -package acl - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Request for the VersionService.GetVersion RPC. -type GetVersionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetVersionRequest) Reset() { - *x = GetVersionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_version_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetVersionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetVersionRequest) ProtoMessage() {} - -func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_version_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. -func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_version_proto_rawDescGZIP(), []int{0} -} - -// Response of the VersionService.GetVersion RPC. -type GetVersionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The version string of the Ory Keto instance. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *GetVersionResponse) Reset() { - *x = GetVersionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_version_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetVersionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetVersionResponse) ProtoMessage() {} - -func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_version_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. -func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_version_proto_rawDescGZIP(), []int{1} -} - -func (x *GetVersionResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -var File_ory_keto_acl_v1alpha1_version_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_version_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, - 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x22, 0x13, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x32, 0x73, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x28, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x96, 0x01, 0x0a, 0x18, 0x73, 0x68, 0x2e, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x42, 0x13, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, - 0x63, 0x6c, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x61, 0x63, 0x6c, 0xaa, - 0x02, 0x15, 0x4f, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x6c, 0x2e, 0x56, - 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x5c, 0x4b, 0x65, - 0x74, 0x6f, 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_version_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_version_proto_rawDescData = file_ory_keto_acl_v1alpha1_version_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_version_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_version_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_version_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_version_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_ory_keto_acl_v1alpha1_version_proto_goTypes = []interface{}{ - (*GetVersionRequest)(nil), // 0: ory.keto.acl.v1alpha1.GetVersionRequest - (*GetVersionResponse)(nil), // 1: ory.keto.acl.v1alpha1.GetVersionResponse -} -var file_ory_keto_acl_v1alpha1_version_proto_depIdxs = []int32{ - 0, // 0: ory.keto.acl.v1alpha1.VersionService.GetVersion:input_type -> ory.keto.acl.v1alpha1.GetVersionRequest - 1, // 1: ory.keto.acl.v1alpha1.VersionService.GetVersion:output_type -> ory.keto.acl.v1alpha1.GetVersionResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_version_proto_init() } -func file_ory_keto_acl_v1alpha1_version_proto_init() { - if File_ory_keto_acl_v1alpha1_version_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_version_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_ory_keto_acl_v1alpha1_version_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_version_proto_depIdxs, - MessageInfos: file_ory_keto_acl_v1alpha1_version_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_version_proto = out.File - file_ory_keto_acl_v1alpha1_version_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_version_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_version_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.proto deleted file mode 100644 index 456df2161b..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "VersionServiceProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - -// The service returning the specific Ory Keto instance version. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis) and [write-APIs](../concepts/api-overview.mdx#write-apis). -service VersionService { - // Returns the version of the Ory Keto instance. - rpc GetVersion(GetVersionRequest) returns (GetVersionResponse); -} - -// Request for the VersionService.GetVersion RPC. -message GetVersionRequest {} - -// Response of the VersionService.GetVersion RPC. -message GetVersionResponse { - // The version string of the Ory Keto instance. - string version = 1; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc.pb.go deleted file mode 100644 index abb39c39a2..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package acl - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// VersionServiceClient is the client API for VersionService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type VersionServiceClient interface { - // Returns the version of the Ory Keto instance. - GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) -} - -type versionServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewVersionServiceClient(cc grpc.ClientConnInterface) VersionServiceClient { - return &versionServiceClient{cc} -} - -func (c *versionServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { - out := new(GetVersionResponse) - err := c.cc.Invoke(ctx, "/ory.keto.acl.v1alpha1.VersionService/GetVersion", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// VersionServiceServer is the server API for VersionService service. -// All implementations should embed UnimplementedVersionServiceServer -// for forward compatibility -type VersionServiceServer interface { - // Returns the version of the Ory Keto instance. - GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) -} - -// UnimplementedVersionServiceServer should be embedded to have forward compatible implementations. -type UnimplementedVersionServiceServer struct { -} - -func (UnimplementedVersionServiceServer) GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") -} - -// UnsafeVersionServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to VersionServiceServer will -// result in compilation errors. -type UnsafeVersionServiceServer interface { - mustEmbedUnimplementedVersionServiceServer() -} - -func RegisterVersionServiceServer(s grpc.ServiceRegistrar, srv VersionServiceServer) { - s.RegisterService(&VersionService_ServiceDesc, srv) -} - -func _VersionService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VersionServiceServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ory.keto.acl.v1alpha1.VersionService/GetVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VersionServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// VersionService_ServiceDesc is the grpc.ServiceDesc for VersionService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var VersionService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ory.keto.acl.v1alpha1.VersionService", - HandlerType: (*VersionServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetVersion", - Handler: _VersionService_GetVersion_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ory/keto/acl/v1alpha1/version.proto", -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.d.ts deleted file mode 100644 index a7449ecc5e..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.d.ts +++ /dev/null @@ -1,41 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/version.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as grpc from "grpc"; -import * as ory_keto_acl_v1alpha1_version_pb from "../../../../ory/keto/acl/v1alpha1/version_pb"; - -interface IVersionServiceService extends grpc.ServiceDefinition { - getVersion: IVersionServiceService_IGetVersion; -} - -interface IVersionServiceService_IGetVersion extends grpc.MethodDefinition { - path: "/ory.keto.acl.v1alpha1.VersionService/GetVersion"; - requestStream: false; - responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; - responseSerialize: grpc.serialize; - responseDeserialize: grpc.deserialize; -} - -export const VersionServiceService: IVersionServiceService; - -export interface IVersionServiceServer { - getVersion: grpc.handleUnaryCall; -} - -export interface IVersionServiceClient { - getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; - getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; - getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; -} - -export class VersionServiceClient extends grpc.Client implements IVersionServiceClient { - constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); - public getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; - public getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; - public getVersion(request: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse) => void): grpc.ClientUnaryCall; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.js deleted file mode 100644 index 4438eb196c..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_grpc_pb.js +++ /dev/null @@ -1,48 +0,0 @@ -// GENERATED CODE -- DO NOT EDIT! - -'use strict'; -var grpc = require('@grpc/grpc-js'); -var ory_keto_acl_v1alpha1_version_pb = require('../../../../ory/keto/acl/v1alpha1/version_pb.js'); - -function serialize_ory_keto_acl_v1alpha1_GetVersionRequest(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_version_pb.GetVersionRequest)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.GetVersionRequest'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_GetVersionRequest(buffer_arg) { - return ory_keto_acl_v1alpha1_version_pb.GetVersionRequest.deserializeBinary(new Uint8Array(buffer_arg)); -} - -function serialize_ory_keto_acl_v1alpha1_GetVersionResponse(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_version_pb.GetVersionResponse)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.GetVersionResponse'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_GetVersionResponse(buffer_arg) { - return ory_keto_acl_v1alpha1_version_pb.GetVersionResponse.deserializeBinary(new Uint8Array(buffer_arg)); -} - - -// The service returning the specific Ory Keto instance version. -// -// This service is part of the [read-APIs](../concepts/api-overview.mdx#read-apis) and [write-APIs](../concepts/api-overview.mdx#write-apis). -var VersionServiceService = exports.VersionServiceService = { - // Returns the version of the Ory Keto instance. -getVersion: { - path: '/ory.keto.acl.v1alpha1.VersionService/GetVersion', - requestStream: false, - responseStream: false, - requestType: ory_keto_acl_v1alpha1_version_pb.GetVersionRequest, - responseType: ory_keto_acl_v1alpha1_version_pb.GetVersionResponse, - requestSerialize: serialize_ory_keto_acl_v1alpha1_GetVersionRequest, - requestDeserialize: deserialize_ory_keto_acl_v1alpha1_GetVersionRequest, - responseSerialize: serialize_ory_keto_acl_v1alpha1_GetVersionResponse, - responseDeserialize: deserialize_ory_keto_acl_v1alpha1_GetVersionResponse, - }, -}; - -exports.VersionServiceClient = grpc.makeGenericClientConstructor(VersionServiceService); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.d.ts deleted file mode 100644 index d37dcf17df..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.d.ts +++ /dev/null @@ -1,45 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/version.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; - -export class GetVersionRequest extends jspb.Message { - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): GetVersionRequest.AsObject; - static toObject(includeInstance: boolean, msg: GetVersionRequest): GetVersionRequest.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: GetVersionRequest, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): GetVersionRequest; - static deserializeBinaryFromReader(message: GetVersionRequest, reader: jspb.BinaryReader): GetVersionRequest; -} - -export namespace GetVersionRequest { - export type AsObject = { - } -} - -export class GetVersionResponse extends jspb.Message { - getVersion(): string; - setVersion(value: string): GetVersionResponse; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): GetVersionResponse.AsObject; - static toObject(includeInstance: boolean, msg: GetVersionResponse): GetVersionResponse.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: GetVersionResponse, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): GetVersionResponse; - static deserializeBinaryFromReader(message: GetVersionResponse, reader: jspb.BinaryReader): GetVersionResponse; -} - -export namespace GetVersionResponse { - export type AsObject = { - version: string, - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.js deleted file mode 100644 index 000b78d853..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/version_pb.js +++ /dev/null @@ -1,290 +0,0 @@ -// source: ory/keto/acl/v1alpha1/version.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.GetVersionRequest', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.GetVersionResponse', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.GetVersionRequest, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.GetVersionRequest.displayName = 'proto.ory.keto.acl.v1alpha1.GetVersionRequest'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.GetVersionResponse, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.GetVersionResponse.displayName = 'proto.ory.keto.acl.v1alpha1.GetVersionResponse'; -} - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.GetVersionRequest.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionRequest} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.toObject = function(includeInstance, msg) { - var f, obj = { - - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.GetVersionRequest} - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.GetVersionRequest; - return proto.ory.keto.acl.v1alpha1.GetVersionRequest.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionRequest} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.GetVersionRequest} - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.GetVersionRequest.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionRequest} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.GetVersionRequest.serializeBinaryToWriter = function(message, writer) { - var f = undefined; -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.GetVersionResponse.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.toObject = function(includeInstance, msg) { - var f, obj = { - version: jspb.Message.getFieldWithDefault(msg, 1, "") - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.GetVersionResponse; - return proto.ory.keto.acl.v1alpha1.GetVersionResponse.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.setVersion(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.GetVersionResponse.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getVersion(); - if (f.length > 0) { - writer.writeString( - 1, - f - ); - } -}; - - -/** - * optional string version = 1; - * @return {string} - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.prototype.getVersion = function() { - return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); -}; - - -/** - * @param {string} value - * @return {!proto.ory.keto.acl.v1alpha1.GetVersionResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.GetVersionResponse.prototype.setVersion = function(value) { - return jspb.Message.setProto3StringField(this, 1, value); -}; - - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.pb.go deleted file mode 100644 index 5f4e267e2d..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.pb.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.13.0 -// source: ory/keto/acl/v1alpha1/write_service.proto - -package acl - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type RelationTupleDelta_Action int32 - -const ( - // Unspecified. - // The `TransactRelationTuples` RPC ignores this - // RelationTupleDelta if an action was unspecified. - RelationTupleDelta_ACTION_UNSPECIFIED RelationTupleDelta_Action = 0 - // Insertion of a new RelationTuple. - // It is ignored if already existing. - RelationTupleDelta_INSERT RelationTupleDelta_Action = 1 - // Deletion of the RelationTuple. - // It is ignored if it does not exist. - RelationTupleDelta_DELETE RelationTupleDelta_Action = 2 -) - -// Enum value maps for RelationTupleDelta_Action. -var ( - RelationTupleDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "INSERT", - 2: "DELETE", - } - RelationTupleDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "INSERT": 1, - "DELETE": 2, - } -) - -func (x RelationTupleDelta_Action) Enum() *RelationTupleDelta_Action { - p := new(RelationTupleDelta_Action) - *p = x - return p -} - -func (x RelationTupleDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (RelationTupleDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_ory_keto_acl_v1alpha1_write_service_proto_enumTypes[0].Descriptor() -} - -func (RelationTupleDelta_Action) Type() protoreflect.EnumType { - return &file_ory_keto_acl_v1alpha1_write_service_proto_enumTypes[0] -} - -func (x RelationTupleDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use RelationTupleDelta_Action.Descriptor instead. -func (RelationTupleDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{1, 0} -} - -// The request of a WriteService.TransactRelationTuples RPC. -type TransactRelationTuplesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The write delta for the relation tuples operated in one single transaction. - // Either all actions succeed or no change takes effect on error. - RelationTupleDeltas []*RelationTupleDelta `protobuf:"bytes,1,rep,name=relation_tuple_deltas,json=relationTupleDeltas,proto3" json:"relation_tuple_deltas,omitempty"` -} - -func (x *TransactRelationTuplesRequest) Reset() { - *x = TransactRelationTuplesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TransactRelationTuplesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransactRelationTuplesRequest) ProtoMessage() {} - -func (x *TransactRelationTuplesRequest) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransactRelationTuplesRequest.ProtoReflect.Descriptor instead. -func (*TransactRelationTuplesRequest) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{0} -} - -func (x *TransactRelationTuplesRequest) GetRelationTupleDeltas() []*RelationTupleDelta { - if x != nil { - return x.RelationTupleDeltas - } - return nil -} - -// Write-delta for a TransactRelationTuplesRequest. -type RelationTupleDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action to do on the RelationTuple. - Action RelationTupleDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=ory.keto.acl.v1alpha1.RelationTupleDelta_Action" json:"action,omitempty"` - // The target RelationTuple. - RelationTuple *RelationTuple `protobuf:"bytes,2,opt,name=relation_tuple,json=relationTuple,proto3" json:"relation_tuple,omitempty"` -} - -func (x *RelationTupleDelta) Reset() { - *x = RelationTupleDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RelationTupleDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RelationTupleDelta) ProtoMessage() {} - -func (x *RelationTupleDelta) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RelationTupleDelta.ProtoReflect.Descriptor instead. -func (*RelationTupleDelta) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{1} -} - -func (x *RelationTupleDelta) GetAction() RelationTupleDelta_Action { - if x != nil { - return x.Action - } - return RelationTupleDelta_ACTION_UNSPECIFIED -} - -func (x *RelationTupleDelta) GetRelationTuple() *RelationTuple { - if x != nil { - return x.RelationTuple - } - return nil -} - -// The response of a WriteService.TransactRelationTuples rpc. -type TransactRelationTuplesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field is not implemented yet and has no effect. - // - Snaptokens []string `protobuf:"bytes,1,rep,name=snaptokens,proto3" json:"snaptokens,omitempty"` -} - -func (x *TransactRelationTuplesResponse) Reset() { - *x = TransactRelationTuplesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TransactRelationTuplesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TransactRelationTuplesResponse) ProtoMessage() {} - -func (x *TransactRelationTuplesResponse) ProtoReflect() protoreflect.Message { - mi := &file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TransactRelationTuplesResponse.ProtoReflect.Descriptor instead. -func (*TransactRelationTuplesResponse) Descriptor() ([]byte, []int) { - return file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP(), []int{2} -} - -func (x *TransactRelationTuplesResponse) GetSnaptokens() []string { - if x != nil { - return x.Snaptokens - } - return nil -} - -var File_ory_keto_acl_v1alpha1_write_service_proto protoreflect.FileDescriptor - -var file_ory_keto_acl_v1alpha1_write_service_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x6f, 0x72, 0x79, - 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x1a, 0x1f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x63, 0x6c, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x1d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x52, - 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x15, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, - 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x13, - 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x44, 0x65, 0x6c, - 0x74, 0x61, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x75, 0x70, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x6f, 0x72, 0x79, - 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, - 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, - 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, - 0x6c, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, - 0x65, 0x22, 0x38, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, - 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x1e, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, - 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x32, 0x96, 0x01, - 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x85, - 0x01, 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x34, 0x2e, 0x6f, 0x72, 0x79, 0x2e, - 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x35, 0x2e, 0x6f, 0x72, 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x94, 0x01, 0x0a, 0x18, 0x73, 0x68, 0x2e, 0x6f, 0x72, - 0x79, 0x2e, 0x6b, 0x65, 0x74, 0x6f, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x42, 0x11, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x72, 0x79, 0x2f, 0x6b, 0x65, 0x74, 0x6f, 0x2f, 0x61, 0x63, 0x6c, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x61, 0x63, 0x6c, 0xaa, 0x02, 0x15, - 0x4f, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x6c, 0x2e, 0x56, 0x31, 0x41, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x4f, 0x72, 0x79, 0x5c, 0x4b, 0x65, 0x74, 0x6f, - 0x5c, 0x41, 0x63, 0x6c, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_ory_keto_acl_v1alpha1_write_service_proto_rawDescOnce sync.Once - file_ory_keto_acl_v1alpha1_write_service_proto_rawDescData = file_ory_keto_acl_v1alpha1_write_service_proto_rawDesc -) - -func file_ory_keto_acl_v1alpha1_write_service_proto_rawDescGZIP() []byte { - file_ory_keto_acl_v1alpha1_write_service_proto_rawDescOnce.Do(func() { - file_ory_keto_acl_v1alpha1_write_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_ory_keto_acl_v1alpha1_write_service_proto_rawDescData) - }) - return file_ory_keto_acl_v1alpha1_write_service_proto_rawDescData -} - -var file_ory_keto_acl_v1alpha1_write_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_ory_keto_acl_v1alpha1_write_service_proto_goTypes = []interface{}{ - (RelationTupleDelta_Action)(0), // 0: ory.keto.acl.v1alpha1.RelationTupleDelta.Action - (*TransactRelationTuplesRequest)(nil), // 1: ory.keto.acl.v1alpha1.TransactRelationTuplesRequest - (*RelationTupleDelta)(nil), // 2: ory.keto.acl.v1alpha1.RelationTupleDelta - (*TransactRelationTuplesResponse)(nil), // 3: ory.keto.acl.v1alpha1.TransactRelationTuplesResponse - (*RelationTuple)(nil), // 4: ory.keto.acl.v1alpha1.RelationTuple -} -var file_ory_keto_acl_v1alpha1_write_service_proto_depIdxs = []int32{ - 2, // 0: ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.relation_tuple_deltas:type_name -> ory.keto.acl.v1alpha1.RelationTupleDelta - 0, // 1: ory.keto.acl.v1alpha1.RelationTupleDelta.action:type_name -> ory.keto.acl.v1alpha1.RelationTupleDelta.Action - 4, // 2: ory.keto.acl.v1alpha1.RelationTupleDelta.relation_tuple:type_name -> ory.keto.acl.v1alpha1.RelationTuple - 1, // 3: ory.keto.acl.v1alpha1.WriteService.TransactRelationTuples:input_type -> ory.keto.acl.v1alpha1.TransactRelationTuplesRequest - 3, // 4: ory.keto.acl.v1alpha1.WriteService.TransactRelationTuples:output_type -> ory.keto.acl.v1alpha1.TransactRelationTuplesResponse - 4, // [4:5] is the sub-list for method output_type - 3, // [3:4] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_ory_keto_acl_v1alpha1_write_service_proto_init() } -func file_ory_keto_acl_v1alpha1_write_service_proto_init() { - if File_ory_keto_acl_v1alpha1_write_service_proto != nil { - return - } - file_ory_keto_acl_v1alpha1_acl_proto_init() - if !protoimpl.UnsafeEnabled { - file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransactRelationTuplesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelationTupleDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransactRelationTuplesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ory_keto_acl_v1alpha1_write_service_proto_rawDesc, - NumEnums: 1, - NumMessages: 3, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_ory_keto_acl_v1alpha1_write_service_proto_goTypes, - DependencyIndexes: file_ory_keto_acl_v1alpha1_write_service_proto_depIdxs, - EnumInfos: file_ory_keto_acl_v1alpha1_write_service_proto_enumTypes, - MessageInfos: file_ory_keto_acl_v1alpha1_write_service_proto_msgTypes, - }.Build() - File_ory_keto_acl_v1alpha1_write_service_proto = out.File - file_ory_keto_acl_v1alpha1_write_service_proto_rawDesc = nil - file_ory_keto_acl_v1alpha1_write_service_proto_goTypes = nil - file_ory_keto_acl_v1alpha1_write_service_proto_depIdxs = nil -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.proto b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.proto deleted file mode 100644 index b1e6f999bd..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package ory.keto.acl.v1alpha1; - -import "ory/keto/acl/v1alpha1/acl.proto"; - -option go_package = "github.com/ory/keto/proto/ory/keto/acl/v1alpha1;acl"; -option csharp_namespace = "Ory.Keto.Acl.V1Alpha1"; -option java_multiple_files = true; -option java_outer_classname = "WatchServiceProto"; -option java_package = "sh.ory.keto.acl.v1alpha1"; -option php_namespace = "Ory\\Keto\\Acl\\V1alpha1"; - -// The write service to create and delete Access Control Lists. -// -// This service is part of the [write-APIs](../concepts/api-overview.mdx#write-apis). -service WriteService { - // Writes one or more relation tuples in a single transaction. - rpc TransactRelationTuples(TransactRelationTuplesRequest) returns (TransactRelationTuplesResponse); -} - -// The request of a WriteService.TransactRelationTuples RPC. -message TransactRelationTuplesRequest { - // The write delta for the relation tuples operated in one single transaction. - // Either all actions succeed or no change takes effect on error. - repeated RelationTupleDelta relation_tuple_deltas = 1; -} - -// Write-delta for a TransactRelationTuplesRequest. -message RelationTupleDelta { - enum Action { - // Unspecified. - // The `TransactRelationTuples` RPC ignores this - // RelationTupleDelta if an action was unspecified. - ACTION_UNSPECIFIED = 0; - - // Insertion of a new RelationTuple. - // It is ignored if already existing. - INSERT = 1; - - // Deletion of the RelationTuple. - // It is ignored if it does not exist. - DELETE = 2; - } - // The action to do on the RelationTuple. - Action action = 1; - // The target RelationTuple. - RelationTuple relation_tuple = 2; -} - -// The response of a WriteService.TransactRelationTuples rpc. -message TransactRelationTuplesResponse { - // This field is not implemented yet and has no effect. - // - repeated string snaptokens = 1; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc.pb.go b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc.pb.go deleted file mode 100644 index 809f065c85..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc.pb.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package acl - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// WriteServiceClient is the client API for WriteService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type WriteServiceClient interface { - // Writes one or more relation tuples in a single transaction. - TransactRelationTuples(ctx context.Context, in *TransactRelationTuplesRequest, opts ...grpc.CallOption) (*TransactRelationTuplesResponse, error) -} - -type writeServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewWriteServiceClient(cc grpc.ClientConnInterface) WriteServiceClient { - return &writeServiceClient{cc} -} - -func (c *writeServiceClient) TransactRelationTuples(ctx context.Context, in *TransactRelationTuplesRequest, opts ...grpc.CallOption) (*TransactRelationTuplesResponse, error) { - out := new(TransactRelationTuplesResponse) - err := c.cc.Invoke(ctx, "/ory.keto.acl.v1alpha1.WriteService/TransactRelationTuples", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// WriteServiceServer is the server API for WriteService service. -// All implementations should embed UnimplementedWriteServiceServer -// for forward compatibility -type WriteServiceServer interface { - // Writes one or more relation tuples in a single transaction. - TransactRelationTuples(context.Context, *TransactRelationTuplesRequest) (*TransactRelationTuplesResponse, error) -} - -// UnimplementedWriteServiceServer should be embedded to have forward compatible implementations. -type UnimplementedWriteServiceServer struct { -} - -func (UnimplementedWriteServiceServer) TransactRelationTuples(context.Context, *TransactRelationTuplesRequest) (*TransactRelationTuplesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TransactRelationTuples not implemented") -} - -// UnsafeWriteServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to WriteServiceServer will -// result in compilation errors. -type UnsafeWriteServiceServer interface { - mustEmbedUnimplementedWriteServiceServer() -} - -func RegisterWriteServiceServer(s grpc.ServiceRegistrar, srv WriteServiceServer) { - s.RegisterService(&WriteService_ServiceDesc, srv) -} - -func _WriteService_TransactRelationTuples_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TransactRelationTuplesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WriteServiceServer).TransactRelationTuples(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ory.keto.acl.v1alpha1.WriteService/TransactRelationTuples", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WriteServiceServer).TransactRelationTuples(ctx, req.(*TransactRelationTuplesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// WriteService_ServiceDesc is the grpc.ServiceDesc for WriteService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var WriteService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ory.keto.acl.v1alpha1.WriteService", - HandlerType: (*WriteServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "TransactRelationTuples", - Handler: _WriteService_TransactRelationTuples_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "ory/keto/acl/v1alpha1/write_service.proto", -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.d.ts deleted file mode 100644 index a302c8f3bf..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.d.ts +++ /dev/null @@ -1,42 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/write_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as grpc from "grpc"; -import * as ory_keto_acl_v1alpha1_write_service_pb from "../../../../ory/keto/acl/v1alpha1/write_service_pb"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -interface IWriteServiceService extends grpc.ServiceDefinition { - transactRelationTuples: IWriteServiceService_ITransactRelationTuples; -} - -interface IWriteServiceService_ITransactRelationTuples extends grpc.MethodDefinition { - path: "/ory.keto.acl.v1alpha1.WriteService/TransactRelationTuples"; - requestStream: false; - responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; - responseSerialize: grpc.serialize; - responseDeserialize: grpc.deserialize; -} - -export const WriteServiceService: IWriteServiceService; - -export interface IWriteServiceServer { - transactRelationTuples: grpc.handleUnaryCall; -} - -export interface IWriteServiceClient { - transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; - transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; - transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; -} - -export class WriteServiceClient extends grpc.Client implements IWriteServiceClient { - constructor(address: string, credentials: grpc.ChannelCredentials, options?: object); - public transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; - public transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; - public transactRelationTuples(request: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse) => void): grpc.ClientUnaryCall; -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.js deleted file mode 100644 index 944a114ab3..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_grpc_pb.js +++ /dev/null @@ -1,49 +0,0 @@ -// GENERATED CODE -- DO NOT EDIT! - -'use strict'; -var grpc = require('@grpc/grpc-js'); -var ory_keto_acl_v1alpha1_write_service_pb = require('../../../../ory/keto/acl/v1alpha1/write_service_pb.js'); -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); - -function serialize_ory_keto_acl_v1alpha1_TransactRelationTuplesRequest(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.TransactRelationTuplesRequest'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_TransactRelationTuplesRequest(buffer_arg) { - return ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest.deserializeBinary(new Uint8Array(buffer_arg)); -} - -function serialize_ory_keto_acl_v1alpha1_TransactRelationTuplesResponse(arg) { - if (!(arg instanceof ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse)) { - throw new Error('Expected argument of type ory.keto.acl.v1alpha1.TransactRelationTuplesResponse'); - } - return Buffer.from(arg.serializeBinary()); -} - -function deserialize_ory_keto_acl_v1alpha1_TransactRelationTuplesResponse(buffer_arg) { - return ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse.deserializeBinary(new Uint8Array(buffer_arg)); -} - - -// The write service to create and delete Access Control Lists. -// -// This service is part of the [write-APIs](../concepts/api-overview.mdx#write-apis). -var WriteServiceService = exports.WriteServiceService = { - // Writes one or more relation tuples in a single transaction. -transactRelationTuples: { - path: '/ory.keto.acl.v1alpha1.WriteService/TransactRelationTuples', - requestStream: false, - responseStream: false, - requestType: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesRequest, - responseType: ory_keto_acl_v1alpha1_write_service_pb.TransactRelationTuplesResponse, - requestSerialize: serialize_ory_keto_acl_v1alpha1_TransactRelationTuplesRequest, - requestDeserialize: deserialize_ory_keto_acl_v1alpha1_TransactRelationTuplesRequest, - responseSerialize: serialize_ory_keto_acl_v1alpha1_TransactRelationTuplesResponse, - responseDeserialize: deserialize_ory_keto_acl_v1alpha1_TransactRelationTuplesResponse, - }, -}; - -exports.WriteServiceClient = grpc.makeGenericClientConstructor(WriteServiceService); diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.d.ts b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.d.ts deleted file mode 100644 index 47cdb40571..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.d.ts +++ /dev/null @@ -1,89 +0,0 @@ -// package: ory.keto.acl.v1alpha1 -// file: ory/keto/acl/v1alpha1/write_service.proto - -/* tslint:disable */ -/* eslint-disable */ - -import * as jspb from "google-protobuf"; -import * as ory_keto_acl_v1alpha1_acl_pb from "../../../../ory/keto/acl/v1alpha1/acl_pb"; - -export class TransactRelationTuplesRequest extends jspb.Message { - clearRelationTupleDeltasList(): void; - getRelationTupleDeltasList(): Array; - setRelationTupleDeltasList(value: Array): TransactRelationTuplesRequest; - addRelationTupleDeltas(value?: RelationTupleDelta, index?: number): RelationTupleDelta; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): TransactRelationTuplesRequest.AsObject; - static toObject(includeInstance: boolean, msg: TransactRelationTuplesRequest): TransactRelationTuplesRequest.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: TransactRelationTuplesRequest, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): TransactRelationTuplesRequest; - static deserializeBinaryFromReader(message: TransactRelationTuplesRequest, reader: jspb.BinaryReader): TransactRelationTuplesRequest; -} - -export namespace TransactRelationTuplesRequest { - export type AsObject = { - relationTupleDeltasList: Array, - } -} - -export class RelationTupleDelta extends jspb.Message { - getAction(): RelationTupleDelta.Action; - setAction(value: RelationTupleDelta.Action): RelationTupleDelta; - - - hasRelationTuple(): boolean; - clearRelationTuple(): void; - getRelationTuple(): ory_keto_acl_v1alpha1_acl_pb.RelationTuple | undefined; - setRelationTuple(value?: ory_keto_acl_v1alpha1_acl_pb.RelationTuple): RelationTupleDelta; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): RelationTupleDelta.AsObject; - static toObject(includeInstance: boolean, msg: RelationTupleDelta): RelationTupleDelta.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: RelationTupleDelta, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): RelationTupleDelta; - static deserializeBinaryFromReader(message: RelationTupleDelta, reader: jspb.BinaryReader): RelationTupleDelta; -} - -export namespace RelationTupleDelta { - export type AsObject = { - action: RelationTupleDelta.Action, - relationTuple?: ory_keto_acl_v1alpha1_acl_pb.RelationTuple.AsObject, - } - - export enum Action { - ACTION_UNSPECIFIED = 0, - INSERT = 1, - DELETE = 2, - } - -} - -export class TransactRelationTuplesResponse extends jspb.Message { - clearSnaptokensList(): void; - getSnaptokensList(): Array; - setSnaptokensList(value: Array): TransactRelationTuplesResponse; - addSnaptokens(value: string, index?: number): string; - - - serializeBinary(): Uint8Array; - toObject(includeInstance?: boolean): TransactRelationTuplesResponse.AsObject; - static toObject(includeInstance: boolean, msg: TransactRelationTuplesResponse): TransactRelationTuplesResponse.AsObject; - static extensions: {[key: number]: jspb.ExtensionFieldInfo}; - static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; - static serializeBinaryToWriter(message: TransactRelationTuplesResponse, writer: jspb.BinaryWriter): void; - static deserializeBinary(bytes: Uint8Array): TransactRelationTuplesResponse; - static deserializeBinaryFromReader(message: TransactRelationTuplesResponse, reader: jspb.BinaryReader): TransactRelationTuplesResponse; -} - -export namespace TransactRelationTuplesResponse { - export type AsObject = { - snaptokensList: Array, - } -} diff --git a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.js b/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.js deleted file mode 100644 index ff271e675a..0000000000 --- a/vendor/github.com/ory/keto/proto/ory/keto/acl/v1alpha1/write_service_pb.js +++ /dev/null @@ -1,590 +0,0 @@ -// source: ory/keto/acl/v1alpha1/write_service.proto -/** - * @fileoverview - * @enhanceable - * @suppress {messageConventions} JS Compiler reports an error if a variable or - * field starts with 'MSG_' and isn't a translatable message. - * @public - */ -// GENERATED CODE -- DO NOT EDIT! - -var jspb = require('google-protobuf'); -var goog = jspb; -var global = Function('return this')(); - -var ory_keto_acl_v1alpha1_acl_pb = require('../../../../ory/keto/acl/v1alpha1/acl_pb.js'); -goog.object.extend(proto, ory_keto_acl_v1alpha1_acl_pb); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.RelationTupleDelta', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest', null, global); -goog.exportSymbol('proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse', null, global); -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.repeatedFields_, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.displayName = 'proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.RelationTupleDelta, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.RelationTupleDelta.displayName = 'proto.ory.keto.acl.v1alpha1.RelationTupleDelta'; -} -/** - * Generated by JsPbCodeGenerator. - * @param {Array=} opt_data Optional initial data array, typically from a - * server response, or constructed directly in Javascript. The array is used - * in place and becomes part of the constructed object. It is not cloned. - * If no data is provided, the constructed object will be empty, but still - * valid. - * @extends {jspb.Message} - * @constructor - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.repeatedFields_, null); -}; -goog.inherits(proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse, jspb.Message); -if (goog.DEBUG && !COMPILED) { - /** - * @public - * @override - */ - proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.displayName = 'proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse'; -} - -/** - * List of repeated fields within this message type. - * @private {!Array} - * @const - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.repeatedFields_ = [1]; - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.toObject = function(includeInstance, msg) { - var f, obj = { - relationTupleDeltasList: jspb.Message.toObjectList(msg.getRelationTupleDeltasList(), - proto.ory.keto.acl.v1alpha1.RelationTupleDelta.toObject, includeInstance) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest; - return proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = new proto.ory.keto.acl.v1alpha1.RelationTupleDelta; - reader.readMessage(value,proto.ory.keto.acl.v1alpha1.RelationTupleDelta.deserializeBinaryFromReader); - msg.addRelationTupleDeltas(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getRelationTupleDeltasList(); - if (f.length > 0) { - writer.writeRepeatedMessage( - 1, - f, - proto.ory.keto.acl.v1alpha1.RelationTupleDelta.serializeBinaryToWriter - ); - } -}; - - -/** - * repeated RelationTupleDelta relation_tuple_deltas = 1; - * @return {!Array} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.getRelationTupleDeltasList = function() { - return /** @type{!Array} */ ( - jspb.Message.getRepeatedWrapperField(this, proto.ory.keto.acl.v1alpha1.RelationTupleDelta, 1)); -}; - - -/** - * @param {!Array} value - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} returns this -*/ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.setRelationTupleDeltasList = function(value) { - return jspb.Message.setRepeatedWrapperField(this, 1, value); -}; - - -/** - * @param {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta=} opt_value - * @param {number=} opt_index - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.addRelationTupleDeltas = function(opt_value, opt_index) { - return jspb.Message.addToRepeatedWrapperField(this, 1, opt_value, proto.ory.keto.acl.v1alpha1.RelationTupleDelta, opt_index); -}; - - -/** - * Clears the list making it empty but non-null. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest} returns this - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesRequest.prototype.clearRelationTupleDeltasList = function() { - return this.setRelationTupleDeltasList([]); -}; - - - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.RelationTupleDelta.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.toObject = function(includeInstance, msg) { - var f, obj = { - action: jspb.Message.getFieldWithDefault(msg, 1, 0), - relationTuple: (f = msg.getRelationTuple()) && ory_keto_acl_v1alpha1_acl_pb.RelationTuple.toObject(includeInstance, f) - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.RelationTupleDelta; - return proto.ory.keto.acl.v1alpha1.RelationTupleDelta.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action} */ (reader.readEnum()); - msg.setAction(value); - break; - case 2: - var value = new ory_keto_acl_v1alpha1_acl_pb.RelationTuple; - reader.readMessage(value,ory_keto_acl_v1alpha1_acl_pb.RelationTuple.deserializeBinaryFromReader); - msg.setRelationTuple(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.RelationTupleDelta.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getAction(); - if (f !== 0.0) { - writer.writeEnum( - 1, - f - ); - } - f = message.getRelationTuple(); - if (f != null) { - writer.writeMessage( - 2, - f, - ory_keto_acl_v1alpha1_acl_pb.RelationTuple.serializeBinaryToWriter - ); - } -}; - - -/** - * @enum {number} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action = { - ACTION_UNSPECIFIED: 0, - INSERT: 1, - DELETE: 2 -}; - -/** - * optional Action action = 1; - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.getAction = function() { - return /** @type {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); -}; - - -/** - * @param {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta.Action} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.setAction = function(value) { - return jspb.Message.setProto3EnumField(this, 1, value); -}; - - -/** - * optional RelationTuple relation_tuple = 2; - * @return {?proto.ory.keto.acl.v1alpha1.RelationTuple} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.getRelationTuple = function() { - return /** @type{?proto.ory.keto.acl.v1alpha1.RelationTuple} */ ( - jspb.Message.getWrapperField(this, ory_keto_acl_v1alpha1_acl_pb.RelationTuple, 2)); -}; - - -/** - * @param {?proto.ory.keto.acl.v1alpha1.RelationTuple|undefined} value - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} returns this -*/ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.setRelationTuple = function(value) { - return jspb.Message.setWrapperField(this, 2, value); -}; - - -/** - * Clears the message field making it undefined. - * @return {!proto.ory.keto.acl.v1alpha1.RelationTupleDelta} returns this - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.clearRelationTuple = function() { - return this.setRelationTuple(undefined); -}; - - -/** - * Returns whether this field is set. - * @return {boolean} - */ -proto.ory.keto.acl.v1alpha1.RelationTupleDelta.prototype.hasRelationTuple = function() { - return jspb.Message.getField(this, 2) != null; -}; - - - -/** - * List of repeated fields within this message type. - * @private {!Array} - * @const - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.repeatedFields_ = [1]; - - - -if (jspb.Message.GENERATE_TO_OBJECT) { -/** - * Creates an object representation of this proto. - * Field names that are reserved in JavaScript and will be renamed to pb_name. - * Optional fields that are not set will be set to undefined. - * To access a reserved field use, foo.pb_, eg, foo.pb_default. - * For the list of reserved names please see: - * net/proto2/compiler/js/internal/generator.cc#kKeyword. - * @param {boolean=} opt_includeInstance Deprecated. whether to include the - * JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @return {!Object} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.toObject = function(opt_includeInstance) { - return proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.toObject(opt_includeInstance, this); -}; - - -/** - * Static version of the {@see toObject} method. - * @param {boolean|undefined} includeInstance Deprecated. Whether to include - * the JSPB instance for transitional soy proto support: - * http://goto/soy-param-migration - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} msg The msg instance to transform. - * @return {!Object} - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.toObject = function(includeInstance, msg) { - var f, obj = { - snaptokensList: (f = jspb.Message.getRepeatedField(msg, 1)) == null ? undefined : f - }; - - if (includeInstance) { - obj.$jspbMessageInstance = msg; - } - return obj; -}; -} - - -/** - * Deserializes binary data (in protobuf wire format). - * @param {jspb.ByteSource} bytes The bytes to deserialize. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.deserializeBinary = function(bytes) { - var reader = new jspb.BinaryReader(bytes); - var msg = new proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse; - return proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.deserializeBinaryFromReader(msg, reader); -}; - - -/** - * Deserializes binary data (in protobuf wire format) from the - * given reader into the given message object. - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} msg The message object to deserialize into. - * @param {!jspb.BinaryReader} reader The BinaryReader to use. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.deserializeBinaryFromReader = function(msg, reader) { - while (reader.nextField()) { - if (reader.isEndGroup()) { - break; - } - var field = reader.getFieldNumber(); - switch (field) { - case 1: - var value = /** @type {string} */ (reader.readString()); - msg.addSnaptokens(value); - break; - default: - reader.skipField(); - break; - } - } - return msg; -}; - - -/** - * Serializes the message to binary data (in protobuf wire format). - * @return {!Uint8Array} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.serializeBinary = function() { - var writer = new jspb.BinaryWriter(); - proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.serializeBinaryToWriter(this, writer); - return writer.getResultBuffer(); -}; - - -/** - * Serializes the given message to binary data (in protobuf wire - * format), writing to the given BinaryWriter. - * @param {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} message - * @param {!jspb.BinaryWriter} writer - * @suppress {unusedLocalVariables} f is only used for nested messages - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.serializeBinaryToWriter = function(message, writer) { - var f = undefined; - f = message.getSnaptokensList(); - if (f.length > 0) { - writer.writeRepeatedString( - 1, - f - ); - } -}; - - -/** - * repeated string snaptokens = 1; - * @return {!Array} - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.getSnaptokensList = function() { - return /** @type {!Array} */ (jspb.Message.getRepeatedField(this, 1)); -}; - - -/** - * @param {!Array} value - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.setSnaptokensList = function(value) { - return jspb.Message.setField(this, 1, value || []); -}; - - -/** - * @param {string} value - * @param {number=} opt_index - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.addSnaptokens = function(value, opt_index) { - return jspb.Message.addToRepeatedField(this, 1, value, opt_index); -}; - - -/** - * Clears the list making it empty but non-null. - * @return {!proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse} returns this - */ -proto.ory.keto.acl.v1alpha1.TransactRelationTuplesResponse.prototype.clearSnaptokensList = function() { - return this.setSnaptokensList([]); -}; - - -goog.object.extend(exports, proto.ory.keto.acl.v1alpha1); diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml new file mode 100644 index 0000000000..559fa399c1 --- /dev/null +++ b/vendor/github.com/stretchr/objx/.codeclimate.yml @@ -0,0 +1,21 @@ +engines: + gofmt: + enabled: true + golint: + enabled: true + govet: + enabled: true + +exclude_patterns: +- ".github/" +- "vendor/" +- "codegen/" +- "*.yml" +- ".*.yml" +- "*.md" +- "Gopkg.*" +- "doc.go" +- "type_specific_codegen_test.go" +- "type_specific_codegen.go" +- ".gitignore" +- "LICENSE" diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore new file mode 100644 index 0000000000..ea58090bd2 --- /dev/null +++ b/vendor/github.com/stretchr/objx/.gitignore @@ -0,0 +1,11 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 0000000000..44d4d9d5a7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 0000000000..246660b21a --- /dev/null +++ b/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,80 @@ +# Objx +[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) +[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) +[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) +[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) +[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) + +Objx - Go package for dealing with maps, slices, JSON and other data. + +Get started: + +- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) +- Check out the API Documentation http://godoc.org/github.com/stretchr/objx + +## Overview +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. + +### Pattern +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. + +### Reading data +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +### Ranging +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } + +## Installation +To install Objx, use go get: + + go get github.com/stretchr/objx + +### Staying up to date +To update Objx to the latest version, run: + + go get -u github.com/stretchr/objx + +### Supported go versions +We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. + +## Contributing +Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml new file mode 100644 index 0000000000..7746f516da --- /dev/null +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -0,0 +1,30 @@ +version: '2' + +env: + GOFLAGS: -mod=vendor + +tasks: + default: + deps: [test] + + lint: + desc: Checks code style + cmds: + - gofmt -d -s *.go + - go vet ./... + silent: true + + lint-fix: + desc: Fixes code style + cmds: + - gofmt -w -s *.go + + test: + desc: Runs go tests + cmds: + - go test -race ./... + + test-coverage: + desc: Runs go tests and calculates test coverage + cmds: + - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 0000000000..4c60455886 --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,197 @@ +package objx + +import ( + "reflect" + "regexp" + "strconv" + "strings" +) + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // arrayAccesRegexString is the regex used to extract the array number + // from the access path + arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + + // mapAccessRegexString is the regex used to extract the map key + // from the access path + mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` +) + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// mapAccessRegex is the compiled mapAccessRegexString +var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true) + return m +} + +// getIndex returns the index, which is hold in s by two braches. +// It also returns s withour the index part, e.g. name[1] will return (1, name). +// If no index is found, -1 is returned +func getIndex(s string) (int, string) { + arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + if len(arrayMatches) > 0 { + // Get the key into the map + selector := arrayMatches[1] + // Get the index into the array at the key + // We know this cannt fail because arrayMatches[2] is an int for sure + index, _ := strconv.Atoi(arrayMatches[2]) + return index, selector + } + return -1, s +} + +// getKey returns the key which is held in s by two brackets. +// It also returns the next selector. +func getKey(s string) (string, string) { + selSegs := strings.SplitN(s, PathSeparator, 2) + thisSel := selSegs[0] + nextSel := "" + + if len(selSegs) > 1 { + nextSel = selSegs[1] + } + + mapMatches := mapAccessRegex.FindStringSubmatch(s) + if len(mapMatches) > 0 { + if _, err := strconv.Atoi(mapMatches[2]); err != nil { + thisSel = mapMatches[1] + nextSel = "[" + mapMatches[2] + "]" + mapMatches[3] + + if thisSel == "" { + thisSel = mapMatches[2] + nextSel = mapMatches[3] + } + + if nextSel == "" { + selSegs = []string{"", ""} + } else if nextSel[0] == '.' { + nextSel = nextSel[1:] + } + } + } + + return thisSel, nextSel +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { + thisSel, nextSel := getKey(selector) + + indexes := []int{} + for strings.Contains(thisSel, "[") { + prevSel := thisSel + index := -1 + index, thisSel = getIndex(thisSel) + indexes = append(indexes, index) + if prevSel == thisSel { + break + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if nextSel == "" && isSet { + curMSI[thisSel] = value + return nil + } + + _, ok := curMSI[thisSel].(map[string]interface{}) + if !ok { + _, ok = curMSI[thisSel].(Map) + } + + if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet { + curMSI[thisSel] = map[string]interface{}{} + } + + current = curMSI[thisSel] + default: + current = nil + } + + // do we need to access the item of an array? + if len(indexes) > 0 { + num := len(indexes) + for num > 0 { + num-- + index := indexes[num] + indexes = indexes[:num] + if array, ok := interSlice(current); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + break + } + } + } + } + + if nextSel != "" { + current = access(current, nextSel, value, isSet) + } + return current +} + +func interSlice(slice interface{}) ([]interface{}, bool) { + if array, ok := slice.([]interface{}); ok { + return array, ok + } + + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + return nil, false + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, true +} diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 0000000000..080aa46e47 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,280 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strconv" +) + +// SignatureSeparator is the character that is used to +// separate the Base64 string from the security signature. +const SignatureSeparator = "_" + +// URLValuesSliceKeySuffix is the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +var urlValuesSliceKeySuffix = "[]" + +const ( + URLValuesSliceKeySuffixEmpty = "" + URLValuesSliceKeySuffixArray = "[]" + URLValuesSliceKeySuffixIndex = "[i]" +) + +// SetURLValuesSliceKeySuffix sets the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +func SetURLValuesSliceKeySuffix(s string) error { + if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex { + urlValuesSliceKeySuffix = s + return nil + } + + return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.") +} + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + for k, v := range m { + m[k] = cleanUp(v) + } + + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +func cleanUpInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanUp(v) + } + return result +} + +func cleanUpInterfaceMap(in map[interface{}]interface{}) Map { + result := Map{} + for k, v := range in { + result[fmt.Sprintf("%v", k)] = cleanUp(v) + } + return result +} + +func cleanUpStringMap(in map[string]interface{}) Map { + result := Map{} + for k, v := range in { + result[k] = cleanUp(v) + } + return result +} + +func cleanUpMSIArray(in []map[string]interface{}) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUpMapArray(in []Map) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUp(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanUpInterfaceArray(v) + case []map[string]interface{}: + return cleanUpMSIArray(v) + case map[interface{}]interface{}: + return cleanUpInterfaceMap(v) + case Map: + return cleanUpStringMap(v) + case []Map: + return cleanUpMapArray(v) + default: + return v + } +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, _ = encoder.Write([]byte(jsonData)) + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + + m.parseURLValues(m, vals, "") + + return vals +} + +func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) { + useSliceIndex := false + if urlValuesSliceKeySuffix == "[i]" { + useSliceIndex = true + } + + for k, v := range queryMap { + val := &Value{data: v} + switch { + case val.IsObjxMap(): + if key == "" { + m.parseURLValues(val.ObjxMap(), vals, k) + } else { + m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]") + } + case val.IsObjxMapSlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustObjxMapSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(sv, vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustObjxMapSlice() { + m.parseURLValues(sv, vals, sliceKey) + } + } + case val.IsMSISlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustMSISlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(New(sv), vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustMSISlice() { + m.parseURLValues(New(sv), vals, sliceKey) + } + } + case val.IsStrSlice(), val.IsBoolSlice(), + val.IsFloat32Slice(), val.IsFloat64Slice(), + val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(), + val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice(): + + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.StringSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + vals.Set(sk, sv) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + vals[sliceKey] = val.StringSlice() + } + + default: + if key == "" { + vals.Set(k, val.String()) + } else { + vals.Set(key+"["+k+"]", val.String()) + } + } + } +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 0000000000..6d6af1a83a --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 0000000000..a64712a08b --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,215 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// +// Returns nil if any key argument is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := Map{} + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + return nil + } + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + return nil + } + newMap[keyString] = value + } + return newMap +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// MustFromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Panics if the JSON is invalid. +func MustFromJSONSlice(jsonString string) []Map { + slice, err := FromJSONSlice(jsonString) + if err != nil { + panic("objx: MustFromJSONSlice failed with error: " + err.Error()) + } + return slice +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var m Map + err := json.Unmarshal([]byte(jsonString), &m) + if err != nil { + return Nil, err + } + return m, nil +} + +// FromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Returns an error if the JSON is invalid. +func FromJSONSlice(jsonString string) ([]Map, error) { + var slice []Map + err := json.Unmarshal([]byte(jsonString), &slice) + if err != nil { + return nil, err + } + return slice, nil +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + m := Map{} + for k, vals := range vals { + m[k] = vals[0] + } + return m, nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 0000000000..c3400a3f70 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,77 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + if !contains(exclude, k) { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := Map{} + for k, v := range m { + copied[k] = v + } + return copied +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := Map{} + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return newMap +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 0000000000..692be8e2a9 --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,12 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security key +func HashWithKey(data, key string) string { + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 0000000000..d9e0b479a4 --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific.go b/vendor/github.com/stretchr/objx/type_specific.go new file mode 100644 index 0000000000..80f88d9fa2 --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific.go @@ -0,0 +1,346 @@ +package objx + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + + s := v.ObjxMapSlice() + if s == nil { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]map[string]interface{}, len(s)) + for i := range s { + result[i] = s[i].Value().MSI() + } + return result +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + if s := v.MSISlice(); s != nil { + return s + } + + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + if !ok { + _, ok = v.data.(Map) + } + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + if !ok { + _, ok = v.data.([]Map) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([]Map); ok { + return s + } + + if s, ok := v.data.([]map[string]interface{}); ok { + result := make([]Map, len(s)) + for i := range s { + result[i] = s[i] + } + return result + } + + s, ok := v.data.([]interface{}) + if !ok { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]Map, len(s)) + for i := range s { + switch s[i].(type) { + case Map: + result[i] = s[i].(Map) + case map[string]interface{}: + result[i] = New(s[i]) + default: + return nil + } + } + return result +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + if s := v.ObjxMapSlice(); s != nil { + return s + } + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + if !ok { + _, ok = v.data.(map[string]interface{}) + } + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + if !ok { + _, ok = v.data.([]map[string]interface{}) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 0000000000..45850456e1 --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2261 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 0000000000..4e5f9b77e6 --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,159 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsNil(): + return "" + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + return fmt.Sprintf("%#v", v.Data()) +} + +// StringSlice returns the value always as a []string +func (v *Value) StringSlice(optionalDefault ...[]string) []string { + switch { + case v.IsStrSlice(): + return v.MustStrSlice() + case v.IsBoolSlice(): + slice := v.MustBoolSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatBool(iv) + } + return vals + case v.IsFloat32Slice(): + slice := v.MustFloat32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32) + } + return vals + case v.IsFloat64Slice(): + slice := v.MustFloat64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(iv, 'f', -1, 64) + } + return vals + case v.IsIntSlice(): + slice := v.MustIntSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt8Slice(): + slice := v.MustInt8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt16Slice(): + slice := v.MustInt16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt32Slice(): + slice := v.MustInt32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt64Slice(): + slice := v.MustInt64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(iv, 10) + } + return vals + case v.IsUintSlice(): + slice := v.MustUintSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint8Slice(): + slice := v.MustUint8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint16Slice(): + slice := v.MustUint16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint32Slice(): + slice := v.MustUint32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint64Slice(): + slice := v.MustUint64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(iv, 10) + } + return vals + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + + return []string{} +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b189..2924cf3a14 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 0000000000..7324128ef1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 0000000000..e6ff8dfeb2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,1104 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // Holds the caller info for the On() call + callerInfo []string + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) + + // PanicMsg holds msg to be used to mock panic on the function call + // if the PanicMsg is set to a non nil string the function call will panic + // irrespective of other settings + PanicMsg *string + + // Calls which must be satisfied before this call can be + requires []*Call +} + +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + callerInfo: callerInfo, + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + PanicMsg: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Panic specifies if the functon call should fail and the panic message +// +// Mock.On("DoSomething").Panic("test panic") +func (c *Call) Panic(msg string) *Call { + c.lock() + defer c.unlock() + + c.PanicMsg = &msg + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method (such as an unmarshaler) that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +//go:noinline +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Unset removes a mock handler from being called. +// test.On("func", mock.Anything).Unset() +func (c *Call) Unset() *Call { + var unlockOnce sync.Once + + for _, arg := range c.Arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + c.lock() + defer unlockOnce.Do(c.unlock) + + foundMatchingCall := false + + // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones + var index int // write index + for _, call := range c.Parent.ExpectedCalls { + if call.Method == c.Method { + _, diffCount := call.Arguments.Diff(c.Arguments) + if diffCount == 0 { + foundMatchingCall = true + // Remove from ExpectedCalls - just skip it + continue + } + } + c.Parent.ExpectedCalls[index] = call + index++ + } + // trim slice up to last copied index + c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] + + if !foundMatchingCall { + unlockOnce.Do(c.unlock) + c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n", + callString(c.Method, c.Arguments, true), + ) + } + + return c +} + +// NotBefore indicates that the mock should only be called after the referenced +// calls have been called as expected. The referenced calls may be from the +// same mock instance and/or other mock instances. +// +// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Init").Return(nil) +// ) +func (c *Call) NotBefore(calls ...*Call) *Call { + c.lock() + defer c.unlock() + + for _, call := range calls { + if call.Parent == nil { + panic("not before calls must be created with Mock.On()") + } + } + + c.requires = append(c.requires, calls...) + return c +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // test is An optional variable that holds the test struct, to be used when an + // invalid mock call was made. + test TestingT + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// String provides a %v format string for Mock. +// Note: this is used implicitly by Arguments.Diff if a Mock is passed. +// It exists because go's default %v formatting traverses the struct +// without acquiring the mutex, which is detected by go test -race. +func (m *Mock) String() string { + return fmt.Sprintf("%[1]T<%[1]p>", m) +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// Test sets the test struct variable of the mock object +func (m *Mock) Test(t TestingT) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.test = t +} + +// fail fails the current test with the given formatted format and args. +// In case that a test was defined, it uses the test APIs for failing a test, +// otherwise it uses panic. +func (m *Mock) fail(format string, args ...interface{}) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.test == nil { + panic(fmt.Sprintf(format, args...)) + } + m.test.Errorf(format, args...) + m.test.FailNow() +} + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, assert.CallerInfo(), arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + var expectedCall *Call + + for i, call := range m.ExpectedCalls { + if call.Method == method { + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + expectedCall = call + if call.Repeatability > -1 { + return i, call + } + } + } + } + + return -1, expectedCall +} + +type matchCandidate struct { + call *Call + mismatch string + diffCount int +} + +func (c matchCandidate) isBetterMatchThan(other matchCandidate) bool { + if c.call == nil { + return false + } + if other.call == nil { + return true + } + + if c.diffCount > other.diffCount { + return false + } + if c.diffCount < other.diffCount { + return true + } + + if c.call.Repeatability > 0 && other.call.Repeatability <= 0 { + return true + } + return false +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { + var bestMatch matchCandidate + + for _, call := range m.expectedCalls() { + if call.Method == method { + + errInfo, tempDiffCount := call.Arguments.Diff(arguments) + tempCandidate := matchCandidate{ + call: call, + mismatch: errInfo, + diffCount: tempDiffCount, + } + if tempCandidate.isBetterMatchThan(bestMatch) { + bestMatch = tempCandidate + } + } + } + + return bestMatch.call, bestMatch.mismatch +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + // Next four lines are required to use GCCGO function naming conventions. + // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + // With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + // TODO: could combine expected and closes in single loop + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // expected call found but it has already been called with repeatable times + if call != nil { + m.mutex.Unlock() + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + closestCall, mismatch := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestCall != nil { + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + callString(methodName, arguments, true), + callString(methodName, closestCall.Arguments, true), + diffArguments(closestCall.Arguments, arguments), + strings.TrimSpace(mismatch), + ) + } else { + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + } + + for _, requirement := range call.requires { + if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied { + m.mutex.Unlock() + m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s", + callString(call.Method, call.Arguments, true), + func() (s string) { + if requirement.totalCalls > 0 { + s = " another call of" + } + if call.Parent != requirement.Parent { + s += " method from another mock instance" + } + return + }(), + callString(requirement.Method, requirement.Arguments, true), + ) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + panicMsg := call.PanicMsg + m.mutex.Unlock() + if panicMsg != nil { + panic(*panicMsg) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + for _, obj := range testObjects { + if m, ok := obj.(*Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + m.mutex.Lock() + defer m.mutex.Unlock() + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + satisfied, reason := m.checkExpectation(expectedCall) + if !satisfied { + failedExpectations++ + } + t.Logf(reason) + } + + if failedExpectations != 0 { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return failedExpectations == 0 +} + +func (m *Mock) checkExpectation(call *Call) (bool, string) { + if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + if call.Repeatability > 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String()) +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if !m.methodWasCalled(methodName, arguments) { + var calledWithArgs []string + for _, call := range m.calls() { + calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) + } + if len(calledWithArgs) == 0 { + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) + } + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if m.methodWasCalled(methodName, arguments) { + return assert.Fail(t, "Should not have called with given arguments", + fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) + } + return true +} + +// IsMethodCallable checking that the method can be called +// If the method was called more than `Repeatability` return false +func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + + for _, v := range m.ExpectedCalls { + if v.Method != methodName { + continue + } + if len(arguments) != len(v.Arguments) { + continue + } + if v.Repeatability < v.totalCalls { + continue + } + if isArgsEqual(v.Arguments, arguments) { + return true + } + } + return false +} + +// isArgsEqual compares arguments +func isArgsEqual(expected Arguments, args []interface{}) bool { + if len(expected) != len(args) { + return false + } + for i, v := range args { + if !reflect.DeepEqual(expected[i], v) { + return false + } + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// IsTypeArgument is a struct that contains the type of an argument +// for use when type checking. This is an alternative to AnythingOfType. +// Used in Diff and Assert. +type IsTypeArgument struct { + t interface{} +} + +// IsType returns an IsTypeArgument object containing the type to check for. +// You can provide a zero-value of the type to check. This is an +// alternative to AnythingOfType. Used in Diff and Assert. +// +// For example: +// Assert(t, IsType(""), IsType(0)) +func IsType(t interface{}) *IsTypeArgument { + return &IsTypeArgument{t: t} +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + // TODO: could return string as error and nil for No difference + + output := "\n" + var differences int + + maxArgCount := len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + var actualFmt, expectedFmt string + + if len(objects) <= i { + actual = "(Missing)" + actualFmt = "(Missing)" + } else { + actual = objects[i] + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) + } + + if len(args) <= i { + expected = "(Missing)" + expectedFmt = "(Missing)" + } else { + expected = args[i] + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) + } + + if matcher, ok := expected.(argumentMatcher); ok { + var matches bool + func() { + defer func() { + if r := recover(); r != nil { + actualFmt = fmt.Sprintf("panic in argument matcher: %v", r) + } + }() + matches = matcher.Matches(actual) + }() + if matches { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { + t := expected.(*IsTypeArgument).t + if reflect.TypeOf(t) != reflect.TypeOf(actual) { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) + } + } else { + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + index := indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore deleted file mode 100644 index 2734907909..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -*.out -*.test -*.xml -*.swp -.idea/ -.tmp/ -*.iml -*.cov -*.html -*.log -gen/thrift/js -gen/thrift/py -vendor/ -crossdock-main -crossdock/jaeger-docker-compose.yml diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules deleted file mode 100644 index 295ebcf622..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "idl"] - path = idl - url = https://github.com/uber/jaeger-idl.git diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml deleted file mode 100644 index 435aea1d5b..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/.travis.yml +++ /dev/null @@ -1,56 +0,0 @@ -sudo: required - -language: go -go_import_path: github.com/uber/jaeger-client-go - -dist: trusty - -matrix: - include: - # - go: 1.15.x - # env: - # - TESTS=true - # - USE_DEP=true - # - COVERAGE=true - - go: 1.15.x - env: - - USE_DEP=true - - CROSSDOCK=true - # - go: 1.15.x - # env: - # - TESTS=true - # - USE_DEP=false - # - USE_GLIDE=true - # test with previous version of Go - # - go: 1.14.x - # env: - # - TESTS=true - # - USE_DEP=true - # - CI_SKIP_LINT=true - -services: - - docker - -env: - global: - - DOCKER_COMPOSE_VERSION=1.8.0 - - COMMIT=${TRAVIS_COMMIT::8} - # DOCKER_PASS - - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk=" - # DOCKER_USER - - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0=" - -install: - - make install-ci USE_DEP=$USE_DEP - - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi - -script: - - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi - - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi - -after_success: - - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi - - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi - -after_failure: - - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md deleted file mode 100644 index 964a4049c0..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ /dev/null @@ -1,396 +0,0 @@ -Changes by Version -================== - -2.30.0 (2021-12-07) -------------------- -- Add deprecation notice -- Yuri Shkuro -- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro -- Remove redundant newline in NewReporter init message (#603) -- wwade -- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan -- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng -- Remove outdated reference to Zipkin model. -- Yuri Shkuro -- Move thrift compilation to a script (#590) -- Aaron Jheng -- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro - -2.29.1 (2021-05-24) -------------------- -- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro - - -2.29.0 (2021-05-20) -------------------- -- Update vendored thrift to 0.14.1 (#584) -- @nhatthm - - -2.28.0 (2021-04-30) -------------------- -- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott - - -2.27.0 (2021-04-19) -------------------- -- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell - - -2.26.0 (2021-04-16) -------------------- -- Delete a baggage item when value is blank (#562) -- evan.kim -- Trim baggage key when parsing (#566) -- sicong.huang -- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o -- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro -- Additional context protections (#544) -- Joe Elliott -- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima -- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro -- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro -- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel -- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro - - -2.25.0 (2020-07-13) -------------------- -## Breaking changes -- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster - - The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments. - The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct, - or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable. - -## Bug fixes -- Do not add invalid context to references (#521) -- Yuri Shkuro - - -2.24.0 (2020-06-14) -------------------- -- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher -- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima -- Override reporter config only when agent host/port is set in env (#513) -- ilylia -- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song - - -2.23.1 (2020-04-28) -------------------- -- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj - - -2.23.0 (2020-04-22) -------------------- - -- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj -- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou -- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura - -2.22.1 (2020-01-16) -------------------- - -- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro - - -2.22.0 (2020-01-15) -------------------- - -- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro -- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura - - -2.21.1 (2019-12-20) -------------------- - -- Update version correctly. - - -2.21.0 (2019-12-20) -------------------- - -- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro -- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro -- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud -- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh - - -2.20.1 (2019-11-08) -------------------- - -Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468 - -- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467 -- Create `OperationNameLateBinding` sampler option and config option -- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc - - -2.20.0 (2019-11-06) -------------------- - -## New Features - -- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj - - Sampling state is shared between all spans of the trace that are still in memory. - This allows implementation of delayed sampling decisions (see below). - -- Support delayed sampling decisions (#449) -- Yuri Shkuro - - This is a large structural change to how the samplers work. - It allows some samplers to be executed multiple times on different - span events (like setting a tag) and make a positive sampling decision - later in the span life cycle, or even based on children spans. - See [README](./README.md#delayed-sampling) for more details. - - There is a related minor change in behavior of the adaptive (per-operation) sampler, - which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the - operation used to make the sampling decision is always the one provided at span creation. - -- Add experimental tag matching sampler (#452) -- Yuri Shkuro - - A sampler that can sample a trace based on a certain tag added to the root - span or one of its local (in-process) children. The sampler can be used with - another experimental `PrioritySampler` that allows multiple samplers to try - to make a sampling decision, in a certain priority order. - -- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta -- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy - -## Minor patches - -- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro -- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro -- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi -- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro - -2.19.0 (2019-09-23) -------------------- - -- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro - - -2.18.1 (2019-09-16) -------------------- - -- Remove go.mod / go.sum that interfere with `go get` (#432) - - -2.18.0 (2019-09-09) -------------------- - -- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) - - -2.17.0 (2019-08-30) -------------------- - -- Add a flag for firehose mode (#419) -- Default sampling server URL to agent (#414) -- Update default sampling rate when sampling strategy is refreshed (#413) -- Support "Self" Span Reference (#411) -- Don't complain about blank service name if tracing is Disabled (#410) Yuri -- Use IP address from tag if exist (#402) -- Expose span data to custom reporters [fixes #394] (#399) -- Fix the span allocation in the pool (#381) - - -2.16.0 (2019-03-24) -------------------- - -- Add baggage to B3 codec (#319) -- Add support for 128bit trace ids to zipkin thrift spans. (#378) -- Update zipkin propagation logic to support 128bit traceIDs (#373) -- Accept "true" for the x-b3-sampled header (#356) - -- Allow setting of PoolSpans from Config object (#322) -- Make propagators public to allow wrapping (#379) -- Change default metric namespace to use relevant separator for the metric backend (#364) -- Change metrics prefix to jaeger_tracer and add descriptions (#346) -- Bump OpenTracing to ^1.1.x (#383) -- Upgrade jaeger-lib to v2.0.0 (#359) -- Avoid defer when generating random number (#358) -- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) -- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) - - -2.15.0 (2018-10-10) -------------------- - -- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) -- Make maximum annotation length configurable in tracer options (#318) -- Support more environment variables in configuration (#323) -- Print error on Sampler Query failure (#328) -- Add an HTTPOption to support custom http.RoundTripper (#333) -- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) - - -2.14.0 (2018-04-30) -------------------- - -- Support throttling for debug traces (#274) -- Remove dependency on Apache Thrift (#303) -- Remove dependency on tchannel (#295) (#294) -- Test with Go 1.9 (#298) - - -2.13.0 (2018-04-15) -------------------- - -- Use value receiver for config.NewTracer() (#283) -- Lock span during jaeger thrift conversion (#273) -- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) -- Added support for client configuration via env vars (#275) -- Allow overriding sampler in the Config (#270) - - -2.12.0 (2018-03-14) -------------------- - -- Use lock when retrieving span.Context() (#268) -- Add Configuration support for custom Injector and Extractor (#263) - - -2.11.2 (2018-01-12) -------------------- - -- Add Gopkg.toml to allow using the lib with `dep` - - -2.11.1 (2018-01-03) -------------------- - -- Do not enqueue spans after Reporter is closed (#235, #245) -- Change default flush interval to 1sec (#243) - - -2.11.0 (2017-11-27) -------------------- - -- Normalize metric names and tags to be compatible with Prometheus (#222) - - -2.10.0 (2017-11-14) -------------------- - -- Support custom tracing headers (#176) -- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182) -- Do not coerce baggage keys to lower case (#196) -- Log span name when span cannot be reported (#198) -- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219) - - -2.9.0 (2017-07-29) ------------------- - -- Pin thrift <= 0.10 (#179) -- Introduce a parallel interface ContribObserver (#159) - - -2.8.0 (2017-07-05) ------------------- - -- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag -- Add options to set tracer tags - - -2.7.0 (2017-06-21) ------------------- - -- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140) -- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147) -- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153) -- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158) -- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161) - - -2.6.0 (2017-03-28) ------------------- - -- Add config option to initialize RPC Metrics feature - - -2.5.0 (2017-03-23) ------------------- - -- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123) -- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124) -- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125) - - -2.4.0 (2017-03-21) ------------------- - -- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121) -- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121) - - -2.3.0 (2017-03-20) ------------------- - -- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117) -- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118) - - -2.2.1 (2017-03-14) ------------------- - -- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111) - - -2.2.0 (2017-03-10) ------------------- - -- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94) -- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103) - - -2.1.2 (2017-02-27) -------------------- - -- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99) -- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100) -- Add tracer initialization godoc examples - - -2.1.1 (2017-02-21) -------------------- - -- Fix inefficient usage of zap.Logger - - -2.1.0 (2017-02-17) -------------------- - -- Add adapter for zap.Logger (https://github.com/uber-go/zap) -- Move logging API to ./log/ package - - -2.0.0 (2017-02-08) -------------------- - -- Support Adaptive Sampling -- Support 128bit Trace IDs -- Change trace/span IDs from uint64 to strong types TraceID and SpanID -- Add Zipkin HTTP B3 Propagation format support #72 -- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics -- Change API for tracer, reporter, sampler initialization - - -1.6.0 (2016-10-14) -------------------- - -- Add Zipkin HTTP transport -- Support external baggage via jaeger-baggage header -- Unpin Thrift version, keep to master - - -1.5.1 (2016-09-27) -------------------- - -- Relax dependency on opentracing to ^1 - - -1.5.0 (2016-09-27) -------------------- - -- Upgrade to opentracing-go 1.0 -- Support KV logging for Spans - - -1.4.0 (2016-09-14) -------------------- - -- Support debug traces via HTTP header "jaeger-debug-id" diff --git a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS deleted file mode 100644 index 0572efcd42..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ - -* @jaegertracing/jaeger-maintainers diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md deleted file mode 100644 index 41e2154cf6..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md +++ /dev/null @@ -1,170 +0,0 @@ -# How to Contribute to Jaeger - -We'd love your help! - -Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub -pull requests. This document outlines some of the conventions on development -workflow, commit message formatting, contact points and other resources to make -it easier to get your contribution accepted. - -We gratefully welcome improvements to documentation as well as to code. - -# Certificate of Origin - -By contributing to this project you agree to the [Developer Certificate of -Origin](https://developercertificate.org/) (DCO). This document was created -by the Linux Kernel community and is a simple statement that you, as a -contributor, have the legal right to make the contribution. See the [DCO](DCO) -file for details. - -## Getting Started - -This library uses [dep](https://golang.github.io/dep/) to manage dependencies. - -To get started, make sure you clone the Git repository into the correct location -`github.com/uber/jaeger-client-go` relative to `$GOPATH`: - -``` -mkdir -p $GOPATH/src/github.com/uber -cd $GOPATH/src/github.com/uber -git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go -cd jaeger-client-go -git submodule update --init --recursive -``` - -Then install dependencies and run the tests: - -``` -make install -make test -``` - -## Imports grouping - -This projects follows the following pattern for grouping imports in Go files: - * imports from standard library - * imports from other projects - * imports from `jaeger-client-go` project - -For example: - -```go -import ( - "fmt" - - "github.com/uber/jaeger-lib/metrics" - "go.uber.org/zap" - - "github.com/uber/jaeger-client-go/config" -) -``` - -## Making A Change - -*Before making any significant changes, please [open an -issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed -changes ahead of time will make the contribution process smooth for everyone. - -Once we've discussed your changes and you've got your code ready, make sure -that tests are passing (`make test` or `make cover`) and open your PR. Your -pull request is most likely to be accepted if it: - -* Includes tests for new functionality. -* Follows the guidelines in [Effective - Go](https://golang.org/doc/effective_go.html) and the [Go team's common code - review comments](https://github.com/golang/go/wiki/CodeReviewComments). -* Has a [good commit message](https://chris.beams.io/posts/git-commit/): - * Separate subject from body with a blank line - * Limit the subject line to 50 characters - * Capitalize the subject line - * Do not end the subject line with a period - * Use the imperative mood in the subject line - * Wrap the body at 72 characters - * Use the body to explain _what_ and _why_ instead of _how_ -* Each commit must be signed by the author ([see below](#sign-your-work)). - -## License - -By contributing your code, you agree to license your contribution under the terms -of the [Apache License](LICENSE). - -If you are adding a new file it should have a header like below. The easiest -way to add such header is to run `make fmt`. - -``` -// Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -``` - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the -patch, which certifies that you wrote it or otherwise have the right to -pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below (from -[developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -using your real name (sorry, no pseudonyms or anonymous contributions.) - -You can add the sign off when creating the git commit via `git commit -s`. - -If you want this to be automatic you can set up some aliases: - -``` -git config --add alias.amend "commit -s --amend" -git config --add alias.c "commit -s" -``` diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO deleted file mode 100644 index 068953d4bd..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/DCO +++ /dev/null @@ -1,37 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. - diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock deleted file mode 100644 index 268289bb41..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock +++ /dev/null @@ -1,301 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113" - name = "github.com/HdrHistogram/hdrhistogram-go" - packages = ["."] - pruneopts = "UT" - revision = "3a0bb77429bd3a61596f5e8a3172445844342120" - version = "0.9.0" - -[[projects]] - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" - version = "v1.0.1" - -[[projects]] - branch = "master" - digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277" - name = "github.com/crossdock/crossdock-go" - packages = [ - ".", - "assert", - "require", - ] - pruneopts = "UT" - revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b" - name = "github.com/golang/mock" - packages = ["gomock"] - pruneopts = "UT" - revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d" - version = "v1.4.4" - -[[projects]] - digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17" - version = "v1.4.2" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01" - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "harness", - "log", - ] - pruneopts = "UT" - revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12" - version = "v1.2.0" - -[[projects]] - digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "614d223910a179a466c1767a985424175c39b465" - version = "v0.9.1" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - ] - pruneopts = "UT" - revision = "170205fb58decfd011f1550d4cfb737230d7ae4f" - version = "v1.1.0" - -[[projects]] - digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9" - version = "v0.2.0" - -[[projects]] - digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc" - version = "v0.14.0" - -[[projects]] - digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/fs", - "internal/util", - ] - pruneopts = "UT" - revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486" - version = "v0.2.0" - -[[projects]] - digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e" - name = "github.com/stretchr/objx" - packages = ["."] - pruneopts = "UT" - revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6" - version = "v0.3.0" - -[[projects]] - digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require", - "suite", - ] - pruneopts = "UT" - revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8" - version = "v1.6.1" - -[[projects]] - digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61" - name = "github.com/uber/jaeger-lib" - packages = [ - "metrics", - "metrics/metricstest", - "metrics/prometheus", - ] - pruneopts = "UT" - revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4" - version = "v2.3.0" - -[[projects]] - digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182" - version = "v1.7.0" - -[[projects]] - digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3114a8b704d2d28dbacda34a872690aaef66aeed" - version = "v1.6.0" - -[[projects]] - digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - "zaptest/observer", - ] - pruneopts = "UT" - revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510" - version = "v1.16.0" - -[[projects]] - branch = "master" - digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - ] - pruneopts = "UT" - revision = "328152dc79b1547da63f950cd4cdd9afd50b2774" - -[[projects]] - branch = "master" - digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86" - name = "golang.org/x/sys" - packages = [ - "internal/unsafeheader", - "unix", - "windows", - ] - pruneopts = "UT" - revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9" - -[[projects]] - digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1" - name = "google.golang.org/protobuf" - packages = [ - "encoding/prototext", - "encoding/protowire", - "internal/descfmt", - "internal/descopts", - "internal/detrand", - "internal/encoding/defval", - "internal/encoding/messageset", - "internal/encoding/tag", - "internal/encoding/text", - "internal/errors", - "internal/fieldsort", - "internal/filedesc", - "internal/filetype", - "internal/flags", - "internal/genid", - "internal/impl", - "internal/mapsort", - "internal/pragma", - "internal/set", - "internal/strs", - "internal/version", - "proto", - "reflect/protoreflect", - "reflect/protoregistry", - "runtime/protoiface", - "runtime/protoimpl", - "types/known/anypb", - "types/known/durationpb", - "types/known/timestamppb", - ] - pruneopts = "UT" - revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1" - version = "v1.25.0" - -[[projects]] - branch = "v3" - digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b" - name = "gopkg.in/yaml.v3" - packages = ["."] - pruneopts = "UT" - revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/crossdock/crossdock-go", - "github.com/golang/mock/gomock", - "github.com/opentracing/opentracing-go", - "github.com/opentracing/opentracing-go/ext", - "github.com/opentracing/opentracing-go/harness", - "github.com/opentracing/opentracing-go/log", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/mock", - "github.com/stretchr/testify/require", - "github.com/stretchr/testify/suite", - "github.com/uber/jaeger-lib/metrics", - "github.com/uber/jaeger-lib/metrics/metricstest", - "github.com/uber/jaeger-lib/metrics/prometheus", - "go.uber.org/atomic", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - "go.uber.org/zap/zaptest/observer", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml deleted file mode 100644 index 3aa307a904..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml +++ /dev/null @@ -1,31 +0,0 @@ -[[constraint]] - name = "github.com/crossdock/crossdock-go" - branch = "master" - -[[constraint]] - name = "github.com/opentracing/opentracing-go" - version = "^1.2" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "^1" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "^1.1.3" - -[[constraint]] - name = "go.uber.org/atomic" - version = "^1" - -[[constraint]] - name = "github.com/uber/jaeger-lib" - version = "^2.3" - -[[constraint]] - name = "go.uber.org/zap" - version = "^1" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile deleted file mode 100644 index ee7b21268a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/Makefile +++ /dev/null @@ -1,135 +0,0 @@ -PROJECT_ROOT=github.com/uber/jaeger-client-go -export GO111MODULE=off -PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u) -# all .go files that don't exist in hidden directories -ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ - -e ".*/\..*" \ - -e ".*/_.*" \ - -e ".*/mocks.*") - -USE_DEP := true - --include crossdock/rules.mk - -RACE=-race -GOTEST=go test -v $(RACE) -GOLINT=golint -GOVET=go vet -GOFMT=gofmt -FMT_LOG=fmt.log -LINT_LOG=lint.log - -THRIFT_VER=0.14 -THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER) -THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift - -PASS=$(shell printf "\033[32mPASS\033[0m") -FAIL=$(shell printf "\033[31mFAIL\033[0m") -COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/'' - -.DEFAULT_GOAL := test-and-lint - -.PHONY: test-and-lint -test-and-lint: test fmt lint - -.PHONY: test -test: -ifeq ($(USE_DEP),true) - dep check -endif - bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)" - -.PHONY: fmt -fmt: - $(GOFMT) -e -s -l -w $(ALL_SRC) - ./scripts/updateLicenses.sh - -.PHONY: lint -lint: vet golint lint-fmt lint-thrift-testing - -.PHONY: vet -vet: - $(GOVET) $(PACKAGES) - -.PHONY: golint -golint: - @cat /dev/null > $(LINT_LOG) - @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) - @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) - -.PHONY: lint-fmt -lint-fmt: - @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) - ./scripts/updateLicenses.sh >> $(FMT_LOG) - @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) - -# make sure thrift/ module does not import "testing" -.PHONY: lint-thrift-testing -lint-thrift-testing: - @cat /dev/null > $(LINT_LOG) - @(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true - @[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false) - -.PHONY: install -install: - @echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE) -ifeq ($(USE_DEP),true) - dep version || make install-dep - dep ensure -vendor-only -v -endif -ifeq ($(USE_GLIDE),true) - glide --version || go get github.com/Masterminds/glide - glide install -endif - - -.PHONY: cover -cover: - $(GOTEST) -cover -coverprofile cover.out $(PACKAGES) - -.PHONY: cover-html -cover-html: cover - go tool cover -html=cover.out -o cover.html - -# This is not part of the regular test target because we don't want to slow it -# down. -.PHONY: test-examples -test-examples: - make -C examples - -.PHONY: thrift -thrift: idl-submodule thrift-compile - -# TODO at the moment we're not generating tchan_*.go files -.PHONY: thrift-compile -thrift-compile: thrift-image - docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh - -.PHONY: idl-submodule -idl-submodule: - git submodule init - git submodule update - -.PHONY: thrift-image -thrift-image: - $(THRIFT) -version - -.PHONY: install-dep -install-dep: - - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep - - chmod +x $$GOPATH/bin/dep - -.PHONY: install-ci -install-ci: install - go get github.com/wadey/gocovmerge - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover - go get golang.org/x/lint/golint - -.PHONY: test-ci -test-ci: cover -ifeq ($(CI_SKIP_LINT),true) - echo 'skipping lint' -else - make lint -endif diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md deleted file mode 100644 index e23912b35a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ /dev/null @@ -1,339 +0,0 @@ -[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] - -# 🛑 This library is being deprecated! - -We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details. - -# Jaeger Bindings for Go OpenTracing API - -Instrumentation library that implements an -[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io). - -**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. - * :white_check_mark: `import "github.com/uber/jaeger-client-go"` - * :x: `import "github.com/jaegertracing/jaeger-client-go"` - -## How to Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md). - -## Installation - -### Preferred - -Add `github.com/uber/jaeger-client-go` to `go.mod`. - -### Old way - -We recommended using a dependency manager like [dep](https://golang.github.io/dep/) -and [semantic versioning](http://semver.org/) when including this library into an application. -For example, Jaeger backend imports this library like this: - -```toml -[[constraint]] - name = "github.com/uber/jaeger-client-go" - version = "2.17" -``` - -If you instead want to use the latest version in `master`, you can pull it via `go get`. -Note that during `go get` you may see build errors due to incompatible dependencies, which is why -we recommend using semantic versions for dependencies. The error may be fixed by running -`make install` (it will install `dep` if you don't have it): - -```shell -go get -u github.com/uber/jaeger-client-go/ -cd $GOPATH/src/github.com/uber/jaeger-client-go/ -git submodule update --init --recursive -make install -``` - -## Initialization - -See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples) -and [config/example_test.go](./config/example_test.go). - -There are two ways to create a tracer: - * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section). - * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions. - -### Environment variables - -The tracer can be initialized with values coming from environment variables, if it is -[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer) -that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv). -None of the env vars are required and all of them can be overridden via direct setting -of the property on the configuration object. - -Property| Description ---- | --- -JAEGER_SERVICE_NAME | The service name. -JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`). -JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`). -JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored. -JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint. -JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint. -JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`). -JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`). -JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`). -JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`). -JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`). -JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/. -JAEGER_SAMPLER_PARAM | The sampler parameter (number). -JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler. -JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`). -JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`). -JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`). -JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`. -JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids. -JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`). -JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`). - -By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and -`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces -to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is -secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment -variables. - -### Closing the tracer via `io.Closer` - -The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. -It is recommended to structure your `main()` so that it calls the `Close()` function on the closer -before exiting, e.g. - -```go -tracer, closer, err := cfg.NewTracer(...) -defer closer.Close() -``` - -This is especially useful for command-line tools that enable tracing, as well as -for the long-running apps that support graceful shutdown. For example, if your deployment -system sends SIGTERM instead of killing the process and you trap that signal to do a graceful -exit, then having `defer closer.Close()` ensures that all buffered spans are flushed. - -### Metrics & Monitoring - -The tracer emits a number of different metrics, defined in -[metrics.go](metrics.go). The monitoring backend is expected to support -tag-based metric names, e.g. instead of `statsd`-style string names -like `counters.my-service.jaeger.spans.started.sampled`, the metrics -are defined by a short name and a collection of key/value tags, for -example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) -file for the full list and descriptions of emitted metrics. - -The monitoring backend is represented by the `metrics.Factory` interface from package -[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation -of that interface can be passed as an option to either the Configuration object or the Tracer -constructor, for example: - -```go -import ( - "github.com/uber/jaeger-client-go/config" - "github.com/uber/jaeger-lib/metrics/prometheus" -) - - metricsFactory := prometheus.New() - tracer, closer, err := config.Configuration{ - ServiceName: "your-service-name", - }.NewTracer( - config.Metrics(metricsFactory), - ) -``` - -By default, a no-op `metrics.NullFactory` is used. - -### Logging - -The tracer can be configured with an optional logger, which will be -used to log communication errors, or log spans if a logging reporter -option is specified in the configuration. The logging API is abstracted -by the [Logger](logger.go) interface. A logger instance implementing -this interface can be set on the `Config` object before calling the -`New` method. - -Besides the [zap](https://github.com/uber-go/zap) implementation -bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) -one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. - -## Instrumentation for Tracing - -Since this tracer is fully compliant with OpenTracing API 1.0, -all code instrumentation should only use the API itself, as described -in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. - -## Features - -### Reporters - -A "reporter" is a component that receives the finished spans and reports -them to somewhere. Under normal circumstances, the Tracer -should use the default `RemoteReporter`, which sends the spans out of -process via configurable "transport". For testing purposes, one can -use an `InMemoryReporter` that accumulates spans in a buffer and -allows to retrieve them for later verification. Also available are -`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter` -which logs all finished spans using their `String()` method, and a -`CompositeReporter` that can be used to combine more than one reporter -into one, e.g. to attach a logging reporter to the main remote reporter. - -### Span Reporting Transports - -The remote reporter uses "transports" to actually send the spans out -of process. Currently the supported transports include: - * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP, - * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP. - -### Sampling - -The tracer does not record all spans, but only those that have the -sampling bit set in the `flags`. When a new trace is started and a new -unique ID is generated, a sampling decision is made whether this trace -should be sampled. The sampling decision is propagated to all downstream -calls via the `flags` field of the trace context. The following samplers -are available: - 1. `RemotelyControlledSampler` uses one of the other simpler samplers - and periodically updates it by polling an external server. This - allows dynamic control of the sampling strategies. - 1. `ConstSampler` always makes the same sampling decision for all - trace IDs. it can be configured to either sample all traces, or - to sample none. - 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability - for a given trace to be sampled. The actual decision is made by - comparing the trace ID with a random number multiplied by the - sampling rate. - 1. `RateLimitingSampler` can be used to allow only a certain fixed - number of traces to be sampled per second. - -#### Delayed sampling - -Version 2.20 introduced the ability to delay sampling decisions in the life cycle -of the root span. It involves several features and architectural changes: - * **Shared sampling state**: the sampling state is shared across all local - (i.e. in-process) spans for a given trace. - * **New `SamplerV2` API** allows the sampler to be called at multiple points - in the life cycle of a span: - * on span creation - * on overwriting span operation name - * on setting span tags - * on finishing the span - * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler - to indicate if the negative sampling decision is final or not (positive sampling - decisions are always final). If the decision is not final, the sampler will be - called again on further span life cycle events, like setting tags. - -These new features are used in the experimental `x.TagMatchingSampler`, which -can sample a trace based on a certain tag added to the root -span or one of its local (in-process) children. The sampler can be used with -another experimental `x.PrioritySampler` that allows multiple samplers to try -to make a sampling decision, in a certain priority order. - -### Baggage Injection - -The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added -to the span context and propagated throughout the trace. An external process can inject baggage -by setting the special HTTP Header `jaeger-baggage` on a request: - -```sh -curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com -``` - -Baggage can also be programatically set inside your service: - -```go -if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetBaggageItem("key", "value") -} -``` - -Another service downstream of that can retrieve the baggage in a similar way: - -```go -if span := opentracing.SpanFromContext(ctx); span != nil { - val := span.BaggageItem("key") - println(val) -} -``` - -### Debug Traces (Forced Sampling) - -#### Programmatically - -The OpenTracing API defines a `sampling.priority` standard tag that -can be used to affect the sampling of a span and its children: - -```go -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -span := opentracing.SpanFromContext(ctx) -ext.SamplingPriority.Set(span, 1) -``` - -#### Via HTTP Headers - -Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`, -which can be set in the incoming request, e.g. - -```sh -curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com -``` - -When Jaeger sees this header in the request that otherwise has no -tracing context, it ensures that the new trace started for this -request will be sampled in the "debug" mode (meaning it should survive -all downsampling that might happen in the collection pipeline), and the -root span will have a tag as if this statement was executed: - -```go -span.SetTag("jaeger-debug-id", "some-correlation-id") -``` - -This allows using Jaeger UI to find the trace by this tag. - -### Zipkin HTTP B3 compatible header propagation - -Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used -by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin). - -However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. - -## SelfRef - -Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed -to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81) -but not yet accepted. This allows the caller to provide an already created `SpanContext` -when starting a new span. The `Self` reference bypasses trace and span id generation, -as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be -set appropriately by the caller). - -The `Self` reference supports the following use cases: - * the ability to provide externally generated trace and span IDs - * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage - -Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type: -``` -span := tracer.StartSpan( - "continued_span", - jaeger.SelfRef(yourSpanContext), -) -... -defer span.Finish() -``` - -## License - -[Apache 2.0 License](LICENSE). - - -[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg -[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go -[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master -[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go -[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go -[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg -[ot-url]: http://opentracing.io -[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item -[timeunits]: https://golang.org/pkg/time/#ParseDuration -[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md deleted file mode 100644 index 12438d8416..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/RELEASE.md +++ /dev/null @@ -1,12 +0,0 @@ -# Release Process - -1. Create a PR "Preparing for release X.Y.Z" against master branch - * Alter CHANGELOG.md from ` (unreleased)` to ` (YYYY-MM-DD)` - * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries - * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` -2. Create a release "Release X.Y.Z" on Github - * Create Tag `vX.Y.Z` - * Copy CHANGELOG.md into the release notes -3. Create a PR "Back to development" against master branch - * Add ` (unreleased)` to CHANGELOG.md - * Update `JaegerClientVersion` in constants.go to `Go-dev` diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go deleted file mode 100644 index 1037ca0e86..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go/log" - - "github.com/uber/jaeger-client-go/internal/baggage" -) - -// baggageSetter is an actor that can set a baggage value on a Span given certain -// restrictions (eg. maxValueLength). -type baggageSetter struct { - restrictionManager baggage.RestrictionManager - metrics *Metrics -} - -func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter { - return &baggageSetter{ - restrictionManager: restrictionManager, - metrics: metrics, - } -} - -// (NB) span should hold the lock before making this call -func (s *baggageSetter) setBaggage(span *Span, key, value string) { - var truncated bool - var prevItem string - restriction := s.restrictionManager.GetRestriction(span.serviceName(), key) - if !restriction.KeyAllowed() { - s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) - s.metrics.BaggageUpdateFailure.Inc(1) - return - } - if len(value) > restriction.MaxValueLength() { - truncated = true - value = value[:restriction.MaxValueLength()] - s.metrics.BaggageTruncate.Inc(1) - } - prevItem = span.context.baggage[key] - s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) - span.context = span.context.WithBaggageItem(key, value) - s.metrics.BaggageUpdateSuccess.Inc(1) -} - -func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) { - if !span.context.IsSampled() { - return - } - fields := []log.Field{ - log.String("event", "baggage"), - log.String("key", key), - log.String("value", value), - } - if prevItem != "" { - fields = append(fields, log.String("override", "true")) - } - if truncated { - fields = append(fields, log.String("truncated", "true")) - } - if !valid { - fields = append(fields, log.String("invalid", "true")) - } - span.logFieldsNoLocking(fields...) -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go deleted file mode 100644 index 06676350b7..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "errors" - "fmt" - "io" - "strings" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go/utils" - - "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/internal/baggage/remote" - throttler "github.com/uber/jaeger-client-go/internal/throttler/remote" - "github.com/uber/jaeger-client-go/rpcmetrics" - "github.com/uber/jaeger-client-go/transport" - "github.com/uber/jaeger-lib/metrics" -) - -const defaultSamplingProbability = 0.001 - -// Configuration configures and creates Jaeger Tracer -type Configuration struct { - // ServiceName specifies the service name to use on the tracer. - // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME - ServiceName string `yaml:"serviceName"` - - // Disabled makes the config return opentracing.NoopTracer. - // Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED. - Disabled bool `yaml:"disabled"` - - // RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided). - // Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS - RPCMetrics bool `yaml:"rpc_metrics"` - - // Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context. - // Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT. - Gen128Bit bool `yaml:"traceid_128bit"` - - // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS - Tags []opentracing.Tag `yaml:"tags"` - - Sampler *SamplerConfig `yaml:"sampler"` - Reporter *ReporterConfig `yaml:"reporter"` - Headers *jaeger.HeadersConfig `yaml:"headers"` - BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"` - Throttler *ThrottlerConfig `yaml:"throttler"` -} - -// SamplerConfig allows initializing a non-default sampler. All fields are optional. -type SamplerConfig struct { - // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote. - // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE - Type string `yaml:"type"` - - // Param is a value passed to the sampler. - // Valid values for Param field are: - // - for "const" sampler, 0 or 1 for always false/true respectively - // - for "probabilistic" sampler, a probability between 0 and 1 - // - for "rateLimiting" sampler, the number of spans per second - // - for "remote" sampler, param is the same as for "probabilistic" - // and indicates the initial sampling rate before the actual one - // is received from the mothership. - // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM - Param float64 `yaml:"param"` - - // SamplingServerURL is the URL of sampling manager that can provide - // sampling strategy to this service. - // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT - SamplingServerURL string `yaml:"samplingServerURL"` - - // SamplingRefreshInterval controls how often the remotely controlled sampler will poll - // sampling manager for the appropriate sampling strategy. - // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL - SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` - - // MaxOperations is the maximum number of operations that the PerOperationSampler - // will keep track of. If an operation is not tracked, a default probabilistic - // sampler will be used rather than the per operation specific sampler. - // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS. - MaxOperations int `yaml:"maxOperations"` - - // Opt-in feature for applications that require late binding of span name via explicit - // call to SetOperationName when using PerOperationSampler. When this feature is enabled, - // the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling - // decision as non-final (and the span as writeable). This may lead to degraded performance - // in applications that always provide the correct span name on trace creation. - // - // For backwards compatibility this option is off by default. - OperationNameLateBinding bool `yaml:"operationNameLateBinding"` - - // Options can be used to programmatically pass additional options to the Remote sampler. - Options []jaeger.SamplerOption -} - -// ReporterConfig configures the reporter. All fields are optional. -type ReporterConfig struct { - // QueueSize controls how many spans the reporter can keep in memory before it starts dropping - // new spans. The queue is continuously drained by a background go-routine, as fast as spans - // can be sent out of process. - // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE - QueueSize int `yaml:"queueSize"` - - // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. - // It is generally not useful, as it only matters for very low traffic services. - // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL - BufferFlushInterval time.Duration - - // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter - // and logs all submitted spans. Main Configuration.Logger must be initialized in the code - // for this option to have any effect. - // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS - LogSpans bool `yaml:"logSpans"` - - // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address. - // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT - LocalAgentHostPort string `yaml:"localAgentHostPort"` - - // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves - // the agent's hostname and reconnects if there was a change. This option only - // applies if LocalAgentHostPort is specified. - // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED - DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"` - - // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname - // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false. - // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL - AttemptReconnectInterval time.Duration - - // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL. - // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT - CollectorEndpoint string `yaml:"collectorEndpoint"` - - // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector. - // Can be provided by FromEnv() via the environment variable named JAEGER_USER - User string `yaml:"user"` - - // Password instructs reporter to include a password for basic http authentication when sending spans to - // jaeger-collector. - // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD - Password string `yaml:"password"` - - // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans. - // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint. - HTTPHeaders map[string]string `yaml:"http_headers"` -} - -// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist -// certain baggage keys. All fields are optional. -type BaggageRestrictionsConfig struct { - // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction - // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have - // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage - // restrictions have been retrieved from jaeger-agent. - DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"` - - // HostPort is the hostPort of jaeger-agent's baggage restrictions server - HostPort string `yaml:"hostPort"` - - // RefreshInterval controls how often the baggage restriction manager will poll - // jaeger-agent for the most recent baggage restrictions. - RefreshInterval time.Duration `yaml:"refreshInterval"` -} - -// ThrottlerConfig configures the throttler which can be used to throttle the -// rate at which the client may send debug requests. -type ThrottlerConfig struct { - // HostPort of jaeger-agent's credit server. - HostPort string `yaml:"hostPort"` - - // RefreshInterval controls how often the throttler will poll jaeger-agent - // for more throttling credits. - RefreshInterval time.Duration `yaml:"refreshInterval"` - - // SynchronousInitialization determines whether or not the throttler should - // synchronously fetch credits from the agent when an operation is seen for - // the first time. This should be set to true if the client will be used by - // a short lived service that needs to ensure that credits are fetched - // upfront such that sampling or throttling occurs. - SynchronousInitialization bool `yaml:"synchronousInitialization"` -} - -type nullCloser struct{} - -func (*nullCloser) Close() error { return nil } - -// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers -// before shutdown. -// -// Deprecated: use NewTracer() function -func (c Configuration) New( - serviceName string, - options ...Option, -) (opentracing.Tracer, io.Closer, error) { - if serviceName != "" { - c.ServiceName = serviceName - } - - return c.NewTracer(options...) -} - -// NewTracer returns a new tracer based on the current configuration, using the given options, -// and a closer func that can be used to flush buffers before shutdown. -func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) { - if c.Disabled { - return &opentracing.NoopTracer{}, &nullCloser{}, nil - } - - if c.ServiceName == "" { - return nil, nil, errors.New("no service name provided") - } - - opts := applyOptions(options...) - tracerMetrics := jaeger.NewMetrics(opts.metrics, nil) - if c.RPCMetrics { - Observer( - rpcmetrics.NewObserver( - opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}), - rpcmetrics.DefaultNameNormalizer, - ), - )(&opts) // adds to c.observers - } - if c.Sampler == nil { - c.Sampler = &SamplerConfig{ - Type: jaeger.SamplerTypeRemote, - Param: defaultSamplingProbability, - } - } - if c.Reporter == nil { - c.Reporter = &ReporterConfig{} - } - - sampler := opts.sampler - if sampler == nil { - s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics) - if err != nil { - return nil, nil, err - } - sampler = s - } - - reporter := opts.reporter - if reporter == nil { - r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger) - if err != nil { - return nil, nil, err - } - reporter = r - } - - tracerOptions := []jaeger.TracerOption{ - jaeger.TracerOptions.Metrics(tracerMetrics), - jaeger.TracerOptions.Logger(opts.logger), - jaeger.TracerOptions.CustomHeaderKeys(c.Headers), - jaeger.TracerOptions.PoolSpans(opts.poolSpans), - jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), - jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength), - jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling), - } - - if c.Gen128Bit || opts.gen128Bit { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true)) - } - - if opts.randomNumber != nil { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber)) - } - - for _, tag := range opts.tags { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) - } - - for _, tag := range c.Tags { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) - } - - for _, obs := range opts.observers { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs)) - } - - for _, cobs := range opts.contribObservers { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs)) - } - - for format, injector := range opts.injectors { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector)) - } - - for format, extractor := range opts.extractors { - tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor)) - } - - if c.BaggageRestrictions != nil { - mgr := remote.NewRestrictionManager( - c.ServiceName, - remote.Options.Metrics(tracerMetrics), - remote.Options.Logger(opts.logger), - remote.Options.HostPort(c.BaggageRestrictions.HostPort), - remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval), - remote.Options.DenyBaggageOnInitializationFailure( - c.BaggageRestrictions.DenyBaggageOnInitializationFailure, - ), - ) - tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr)) - } - - if c.Throttler != nil { - debugThrottler := throttler.NewThrottler( - c.ServiceName, - throttler.Options.Metrics(tracerMetrics), - throttler.Options.Logger(opts.logger), - throttler.Options.HostPort(c.Throttler.HostPort), - throttler.Options.RefreshInterval(c.Throttler.RefreshInterval), - throttler.Options.SynchronousInitialization( - c.Throttler.SynchronousInitialization, - ), - ) - - tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler)) - } - - tracer, closer := jaeger.NewTracer( - c.ServiceName, - sampler, - reporter, - tracerOptions..., - ) - - return tracer, closer, nil -} - -// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer. -// It returns a closer func that can be used to flush buffers before shutdown. -func (c Configuration) InitGlobalTracer( - serviceName string, - options ...Option, -) (io.Closer, error) { - if c.Disabled { - return &nullCloser{}, nil - } - tracer, closer, err := c.New(serviceName, options...) - if err != nil { - return nil, err - } - opentracing.SetGlobalTracer(tracer) - return closer, nil -} - -// NewSampler creates a new sampler based on the configuration -func (sc *SamplerConfig) NewSampler( - serviceName string, - metrics *jaeger.Metrics, -) (jaeger.Sampler, error) { - samplerType := strings.ToLower(sc.Type) - if samplerType == jaeger.SamplerTypeConst { - return jaeger.NewConstSampler(sc.Param != 0), nil - } - if samplerType == jaeger.SamplerTypeProbabilistic { - if sc.Param >= 0 && sc.Param <= 1.0 { - return jaeger.NewProbabilisticSampler(sc.Param) - } - return nil, fmt.Errorf( - "invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v", - sc.Param, - ) - } - if samplerType == jaeger.SamplerTypeRateLimiting { - return jaeger.NewRateLimitingSampler(sc.Param), nil - } - if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" { - sc2 := *sc - sc2.Type = jaeger.SamplerTypeProbabilistic - initSampler, err := sc2.NewSampler(serviceName, nil) - if err != nil { - return nil, err - } - options := []jaeger.SamplerOption{ - jaeger.SamplerOptions.Metrics(metrics), - jaeger.SamplerOptions.InitialSampler(initSampler), - jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL), - jaeger.SamplerOptions.MaxOperations(sc.MaxOperations), - jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding), - jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval), - } - options = append(options, sc.Options...) - return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil - } - return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type) -} - -// NewReporter instantiates a new reporter that submits spans to the collector -func (rc *ReporterConfig) NewReporter( - serviceName string, - metrics *jaeger.Metrics, - logger jaeger.Logger, -) (jaeger.Reporter, error) { - sender, err := rc.newTransport(logger) - if err != nil { - return nil, err - } - reporter := jaeger.NewRemoteReporter( - sender, - jaeger.ReporterOptions.QueueSize(rc.QueueSize), - jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval), - jaeger.ReporterOptions.Logger(logger), - jaeger.ReporterOptions.Metrics(metrics)) - if rc.LogSpans && logger != nil { - logger.Infof("Initializing logging reporter") - reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter) - } - return reporter, err -} - -func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) { - switch { - case rc.CollectorEndpoint != "": - httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)} - if rc.User != "" && rc.Password != "" { - httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password)) - } - return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil - default: - return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{ - AgentClientUDPParams: utils.AgentClientUDPParams{ - HostPort: rc.LocalAgentHostPort, - Logger: logger, - DisableAttemptReconnecting: rc.DisableAttemptReconnecting, - AttemptReconnectInterval: rc.AttemptReconnectInterval, - }, - }) - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go deleted file mode 100644 index 0fc3c53fd3..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/config_env.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/uber/jaeger-client-go" -) - -const ( - // environment variable names - envServiceName = "JAEGER_SERVICE_NAME" - envDisabled = "JAEGER_DISABLED" - envRPCMetrics = "JAEGER_RPC_METRICS" - envTags = "JAEGER_TAGS" - envSamplerType = "JAEGER_SAMPLER_TYPE" - envSamplerParam = "JAEGER_SAMPLER_PARAM" - envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint - envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT" - envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS" - envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" - envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE" - envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL" - envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS" - envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED" - envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL" - envEndpoint = "JAEGER_ENDPOINT" - envUser = "JAEGER_USER" - envPassword = "JAEGER_PASSWORD" - envAgentHost = "JAEGER_AGENT_HOST" - envAgentPort = "JAEGER_AGENT_PORT" - env128bit = "JAEGER_TRACEID_128BIT" -) - -// FromEnv uses environment variables to set the tracer's Configuration -func FromEnv() (*Configuration, error) { - c := &Configuration{} - return c.FromEnv() -} - -// FromEnv uses environment variables and overrides existing tracer's Configuration -func (c *Configuration) FromEnv() (*Configuration, error) { - if e := os.Getenv(envServiceName); e != "" { - c.ServiceName = e - } - - if e := os.Getenv(envRPCMetrics); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - c.RPCMetrics = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e) - } - } - - if e := os.Getenv(envDisabled); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - c.Disabled = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e) - } - } - - if e := os.Getenv(envTags); e != "" { - c.Tags = parseTags(e) - } - - if e := os.Getenv(env128bit); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - c.Gen128Bit = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e) - } - } - - if c.Sampler == nil { - c.Sampler = &SamplerConfig{} - } - - if s, err := c.Sampler.samplerConfigFromEnv(); err == nil { - c.Sampler = s - } else { - return nil, errors.Wrap(err, "cannot obtain sampler config from env") - } - - if c.Reporter == nil { - c.Reporter = &ReporterConfig{} - } - - if r, err := c.Reporter.reporterConfigFromEnv(); err == nil { - c.Reporter = r - } else { - return nil, errors.Wrap(err, "cannot obtain reporter config from env") - } - - return c, nil -} - -// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables -func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) { - if e := os.Getenv(envSamplerType); e != "" { - sc.Type = e - } - - if e := os.Getenv(envSamplerParam); e != "" { - if value, err := strconv.ParseFloat(e, 64); err == nil { - sc.Param = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e) - } - } - - if e := os.Getenv(envSamplingEndpoint); e != "" { - sc.SamplingServerURL = e - } else if e := os.Getenv(envSamplerManagerHostPort); e != "" { - sc.SamplingServerURL = e - } else if e := os.Getenv(envAgentHost); e != "" { - // Fallback if we know the agent host - try the sampling endpoint there - sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort) - } - - if e := os.Getenv(envSamplerMaxOperations); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - sc.MaxOperations = int(value) - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e) - } - } - - if e := os.Getenv(envSamplerRefreshInterval); e != "" { - if value, err := time.ParseDuration(e); err == nil { - sc.SamplingRefreshInterval = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e) - } - } - - return sc, nil -} - -// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables -func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) { - if e := os.Getenv(envReporterMaxQueueSize); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - rc.QueueSize = int(value) - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e) - } - } - - if e := os.Getenv(envReporterFlushInterval); e != "" { - if value, err := time.ParseDuration(e); err == nil { - rc.BufferFlushInterval = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e) - } - } - - if e := os.Getenv(envReporterLogSpans); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - rc.LogSpans = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e) - } - } - - if e := os.Getenv(envEndpoint); e != "" { - u, err := url.ParseRequestURI(e) - if err != nil { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e) - } - rc.CollectorEndpoint = u.String() - user := os.Getenv(envUser) - pswd := os.Getenv(envPassword) - if user != "" && pswd == "" || user == "" && pswd != "" { - return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword) - } - rc.User = user - rc.Password = pswd - } else { - useEnv := false - host := jaeger.DefaultUDPSpanServerHost - if e := os.Getenv(envAgentHost); e != "" { - host = e - useEnv = true - } - - port := jaeger.DefaultUDPSpanServerPort - if e := os.Getenv(envAgentPort); e != "" { - if value, err := strconv.ParseInt(e, 10, 0); err == nil { - port = int(value) - useEnv = true - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) - } - } - if useEnv || rc.LocalAgentHostPort == "" { - rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) - } - - if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" { - if value, err := strconv.ParseBool(e); err == nil { - rc.DisableAttemptReconnecting = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e) - } - } - - if !rc.DisableAttemptReconnecting { - if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" { - if value, err := time.ParseDuration(e); err == nil { - rc.AttemptReconnectInterval = value - } else { - return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e) - } - } - } - } - - return rc, nil -} - -// parseTags parses the given string into a collection of Tags. -// Spec for this value: -// - comma separated list of key=value -// - value can be specified using the notation ${envVar:defaultValue}, where `envVar` -// is an environment variable and `defaultValue` is the value to use in case the env var is not set -func parseTags(sTags string) []opentracing.Tag { - pairs := strings.Split(sTags, ",") - tags := make([]opentracing.Tag, 0) - for _, p := range pairs { - kv := strings.SplitN(p, "=", 2) - k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) - - if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { - ed := strings.SplitN(v[2:len(v)-1], ":", 2) - e, d := ed[0], ed[1] - v = os.Getenv(e) - if v == "" && d != "" { - v = d - } - } - - tag := opentracing.Tag{Key: k, Value: v} - tags = append(tags, tag) - } - - return tags -} diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go deleted file mode 100644 index a2b9cbc28b..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/config/options.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - opentracing "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-lib/metrics" - - "github.com/uber/jaeger-client-go" -) - -// Option is a function that sets some option on the client. -type Option func(c *Options) - -// Options control behavior of the client. -type Options struct { - metrics metrics.Factory - logger jaeger.Logger - reporter jaeger.Reporter - sampler jaeger.Sampler - contribObservers []jaeger.ContribObserver - observers []jaeger.Observer - gen128Bit bool - poolSpans bool - zipkinSharedRPCSpan bool - maxTagValueLength int - noDebugFlagOnForcedSampling bool - tags []opentracing.Tag - injectors map[interface{}]jaeger.Injector - extractors map[interface{}]jaeger.Extractor - randomNumber func() uint64 -} - -// Metrics creates an Option that initializes Metrics in the tracer, -// which is used to emit statistics about spans. -func Metrics(factory metrics.Factory) Option { - return func(c *Options) { - c.metrics = factory - } -} - -// Logger can be provided to log Reporter errors, as well as to log spans -// if Reporter.LogSpans is set to true. -func Logger(logger jaeger.Logger) Option { - return func(c *Options) { - c.logger = logger - } -} - -// Reporter can be provided explicitly to override the configuration. -// Useful for testing, e.g. by passing InMemoryReporter. -func Reporter(reporter jaeger.Reporter) Option { - return func(c *Options) { - c.reporter = reporter - } -} - -// Sampler can be provided explicitly to override the configuration. -func Sampler(sampler jaeger.Sampler) Option { - return func(c *Options) { - c.sampler = sampler - } -} - -// Observer can be registered with the Tracer to receive notifications about new Spans. -func Observer(observer jaeger.Observer) Option { - return func(c *Options) { - c.observers = append(c.observers, observer) - } -} - -// ContribObserver can be registered with the Tracer to receive notifications -// about new spans. -func ContribObserver(observer jaeger.ContribObserver) Option { - return func(c *Options) { - c.contribObservers = append(c.contribObservers, observer) - } -} - -// Gen128Bit specifies whether to generate 128bit trace IDs. -func Gen128Bit(gen128Bit bool) Option { - return func(c *Options) { - c.gen128Bit = gen128Bit - } -} - -// PoolSpans specifies whether to pool spans -func PoolSpans(poolSpans bool) Option { - return func(c *Options) { - c.poolSpans = poolSpans - } -} - -// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client -// and server spans a la zipkin. If false, client and server spans will be assigned -// different IDs. -func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option { - return func(c *Options) { - c.zipkinSharedRPCSpan = zipkinSharedRPCSpan - } -} - -// MaxTagValueLength can be provided to override the default max tag value length. -func MaxTagValueLength(maxTagValueLength int) Option { - return func(c *Options) { - c.maxTagValueLength = maxTagValueLength - } -} - -// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not -// when calling span.setSamplingPriority to force sample a span. -func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option { - return func(c *Options) { - c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling - } -} - -// Tag creates an option that adds a tracer-level tag. -func Tag(key string, value interface{}) Option { - return func(c *Options) { - c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value}) - } -} - -// Injector registers an Injector with the given format. -func Injector(format interface{}, injector jaeger.Injector) Option { - return func(c *Options) { - c.injectors[format] = injector - } -} - -// Extractor registers an Extractor with the given format. -func Extractor(format interface{}, extractor jaeger.Extractor) Option { - return func(c *Options) { - c.extractors[format] = extractor - } -} - -// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs. -func WithRandomNumber(f func() uint64) Option { - return func(c *Options) { - c.randomNumber = f - } -} - -func applyOptions(options ...Option) Options { - opts := Options{ - injectors: make(map[interface{}]jaeger.Injector), - extractors: make(map[interface{}]jaeger.Extractor), - } - for _, option := range options { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = metrics.NullFactory - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go deleted file mode 100644 index 35710cfef6..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - - "github.com/opentracing/opentracing-go" -) - -const ( - // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.30.0" - - // JaegerClientVersionTagKey is the name of the tag used to report client version. - JaegerClientVersionTagKey = "jaeger.version" - - // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, - // if found in the carrier, forces the trace to be sampled as "debug" trace. - // The value of the header is recorded as the tag on the root span, so that the - // trace can be found in the UI using this value as a correlation ID. - JaegerDebugHeader = "jaeger-debug-id" - - // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. - // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where - // a root span does not exist. - JaegerBaggageHeader = "jaeger-baggage" - - // TracerHostnameTagKey used to report host name of the process. - TracerHostnameTagKey = "hostname" - - // TracerIPTagKey used to report ip of the process. - TracerIPTagKey = "ip" - - // TracerUUIDTagKey used to report UUID of the client process. - TracerUUIDTagKey = "client-uuid" - - // SamplerTypeTagKey reports which sampler was used on the root span. - SamplerTypeTagKey = "sampler.type" - - // SamplerParamTagKey reports the parameter of the sampler, like sampling probability. - SamplerParamTagKey = "sampler.param" - - // TraceContextHeaderName is the http header name used to propagate tracing context. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceContextHeaderName = "uber-trace-id" - - // TracerStateHeaderName is deprecated. - // Deprecated: use TraceContextHeaderName - TracerStateHeaderName = TraceContextHeaderName - - // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceBaggageHeaderPrefix = "uberctx-" - - // SamplerTypeConst is the type of sampler that always makes the same decision. - SamplerTypeConst = "const" - - // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy. - SamplerTypeRemote = "remote" - - // SamplerTypeProbabilistic is the type of sampler that samples traces - // with a certain fixed probability. - SamplerTypeProbabilistic = "probabilistic" - - // SamplerTypeRateLimiting is the type of sampler that samples - // only up to a fixed number of traces per second. - SamplerTypeRateLimiting = "ratelimiting" - - // SamplerTypeLowerBound is the type of sampler that samples - // at least a fixed number of traces per second. - SamplerTypeLowerBound = "lowerbound" - - // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP - DefaultUDPSpanServerHost = "localhost" - - // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP - DefaultUDPSpanServerPort = 6831 - - // DefaultSamplingServerPort is the default port to fetch sampling config from, via http - DefaultSamplingServerPort = 5778 - - // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value. - DefaultMaxTagValueLength = 256 - - // SelfRefType is a jaeger specific reference type that supports creating a span - // with an already defined context. - selfRefType opentracing.SpanReferenceType = 99 -) - -var ( - // DefaultSamplingServerURL is the default url to fetch sampling config from, via http - DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort) -) diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go deleted file mode 100644 index 4ce1881f3b..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - opentracing "github.com/opentracing/opentracing-go" -) - -// ContribObserver can be registered with the Tracer to receive notifications -// about new Spans. Modelled after github.com/opentracing-contrib/go-observer. -type ContribObserver interface { - // Create and return a span observer. Called when a span starts. - // If the Observer is not interested in the given span, it must return (nil, false). - // E.g : - // func StartSpan(opName string, opts ...opentracing.StartSpanOption) { - // var sp opentracing.Span - // sso := opentracing.StartSpanOptions{} - // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok { - // // we have a valid SpanObserver - // } - // ... - // } - OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) -} - -// ContribSpanObserver is created by the Observer and receives notifications -// about other Span events. This interface is meant to match -// github.com/opentracing-contrib/go-observer, via duck typing, without -// directly importing the go-observer package. -type ContribSpanObserver interface { - OnSetOperationName(operationName string) - OnSetTag(key string, value interface{}) - OnFinish(options opentracing.FinishOptions) -} - -// wrapper observer for the old observers (see observer.go) -type oldObserver struct { - obs Observer -} - -func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) { - spanObserver := o.obs.OnStartSpan(operationName, options) - return spanObserver, spanObserver != nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go deleted file mode 100644 index fac3c09f9f..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. - -For integration instructions please refer to the README: - -https://github.com/uber/jaeger-client-go/blob/master/README.md -*/ -package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock deleted file mode 100644 index c1ec339258..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/glide.lock +++ /dev/null @@ -1,105 +0,0 @@ -hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed -updated: 2020-07-31T13:30:37.242608-04:00 -imports: -- name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb - subpackages: - - quantile -- name: github.com/HdrHistogram/hdrhistogram-go - version: 3a0bb77429bd3a61596f5e8a3172445844342120 -- name: github.com/crossdock/crossdock-go - version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 - subpackages: - - assert - - require -- name: github.com/davecgh/go-spew - version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 - subpackages: - - spew -- name: github.com/golang/mock - version: 51421b967af1f557f93a59e0057aaf15ca02e29c - subpackages: - - gomock -- name: github.com/golang/protobuf - version: b5d812f8a3706043e23a9cd5babf2e5423744d30 - subpackages: - - proto -- name: github.com/matttproud/golang_protobuf_extensions - version: c182affec369e30f25d3eb8cd8a478dee585ae7d - subpackages: - - pbutil -- name: github.com/opentracing/opentracing-go - version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12 - subpackages: - - ext - - harness - - log -- name: github.com/pkg/errors - version: ba968bfe8b2f7e042a574c888954fccecfa385b4 -- name: github.com/pmezard/go-difflib - version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc - subpackages: - - difflib -- name: github.com/prometheus/client_golang - version: 170205fb58decfd011f1550d4cfb737230d7ae4f - subpackages: - - prometheus - - prometheus/internal -- name: github.com/prometheus/client_model - version: fd36f4220a901265f90734c3183c5f0c91daa0b8 - subpackages: - - go -- name: github.com/prometheus/common - version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119 - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: 8a055596020d692cf491851e47ba3e302d9f90ce - subpackages: - - internal/fs - - internal/util -- name: github.com/stretchr/testify - version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8 - subpackages: - - assert - - mock - - require - - suite -- name: github.com/uber-go/atomic - version: 845920076a298bdb984fb0f1b86052e4ca0a281c -- name: github.com/uber/jaeger-lib - version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4 - subpackages: - - metrics - - metrics/metricstest - - metrics/prometheus -- name: go.uber.org/atomic - version: 845920076a298bdb984fb0f1b86052e4ca0a281c -- name: go.uber.org/multierr - version: b587143a48b62b01d337824eab43700af6ffe222 -- name: go.uber.org/zap - version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd - subpackages: - - buffer - - internal/bufferpool - - internal/color - - internal/exit - - zapcore - - zaptest/observer -- name: golang.org/x/net - version: addf6b3196f61cd44ce5a76657913698c73479d0 - subpackages: - - context - - context/ctxhttp -- name: golang.org/x/sys - version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64 - subpackages: - - internal/unsafeheader - - windows -- name: gopkg.in/yaml.v3 - version: eeeca48fe7764f320e4870d231902bf9c1be2c08 -testImports: -- name: github.com/stretchr/objx - version: 35313a95ee26395aa17d366c71a2ccf788fa69b6 diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml deleted file mode 100644 index 295678c910..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/glide.yaml +++ /dev/null @@ -1,30 +0,0 @@ -package: github.com/uber/jaeger-client-go -import: -- package: github.com/opentracing/opentracing-go - version: ^1.2 - subpackages: - - ext - - log -- package: github.com/crossdock/crossdock-go -- package: github.com/uber/jaeger-lib - version: ^2.3.0 - subpackages: - - metrics -- package: github.com/pkg/errors - version: ~0.8.0 -- package: go.uber.org/zap - source: https://github.com/uber-go/zap.git - version: ^1 -- package: github.com/uber-go/atomic - version: ^1 -- package: github.com/prometheus/client_golang - version: 1.1 -- package: github.com/prometheus/procfs - version: 0.0.6 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert - - require - - suite -- package: github.com/golang/mock diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go deleted file mode 100644 index 5da70351d9..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/header.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -// HeadersConfig contains the values for the header keys that Jaeger will use. -// These values may be either custom or default depending on whether custom -// values were provided via a configuration. -type HeadersConfig struct { - // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, - // if found in the carrier, forces the trace to be sampled as "debug" trace. - // The value of the header is recorded as the tag on the root span, so that the - // trace can be found in the UI using this value as a correlation ID. - JaegerDebugHeader string `yaml:"jaegerDebugHeader"` - - // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. - // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where - // a root span does not exist. - JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"` - - // TraceContextHeaderName is the http header name used to propagate tracing context. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceContextHeaderName string `yaml:"TraceContextHeaderName"` - - // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. - // This must be in lower-case to avoid mismatches when decoding incoming headers. - TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"` -} - -// ApplyDefaults sets missing configuration keys to default values -func (c *HeadersConfig) ApplyDefaults() *HeadersConfig { - if c.JaegerBaggageHeader == "" { - c.JaegerBaggageHeader = JaegerBaggageHeader - } - if c.JaegerDebugHeader == "" { - c.JaegerDebugHeader = JaegerDebugHeader - } - if c.TraceBaggageHeaderPrefix == "" { - c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix - } - if c.TraceContextHeaderName == "" { - c.TraceContextHeaderName = TraceContextHeaderName - } - return c -} - -func getDefaultHeadersConfig() *HeadersConfig { - return &HeadersConfig{ - JaegerDebugHeader: JaegerDebugHeader, - JaegerBaggageHeader: JaegerBaggageHeader, - TraceContextHeaderName: TraceContextHeaderName, - TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go deleted file mode 100644 index 745729319f..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "time" - - "github.com/uber/jaeger-client-go" -) - -const ( - defaultMaxValueLength = 2048 - defaultRefreshInterval = time.Minute - defaultHostPort = "localhost:5778" -) - -// Option is a function that sets some option on the RestrictionManager -type Option func(options *options) - -// Options is a factory for all available options -var Options options - -type options struct { - denyBaggageOnInitializationFailure bool - metrics *jaeger.Metrics - logger jaeger.Logger - hostPort string - refreshInterval time.Duration -} - -// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager. -// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage -// restrictions have been retrieved from agent. -// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage -// restrictions have been retrieved from agent. -func (options) DenyBaggageOnInitializationFailure(b bool) Option { - return func(o *options) { - o.denyBaggageOnInitializationFailure = b - } -} - -// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics. -func (options) Metrics(m *jaeger.Metrics) Option { - return func(o *options) { - o.metrics = m - } -} - -// Logger creates an Option that sets the logger used by the RestrictionManager. -func (options) Logger(logger jaeger.Logger) Option { - return func(o *options) { - o.logger = logger - } -} - -// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions. -func (options) HostPort(hostPort string) Option { - return func(o *options) { - o.hostPort = hostPort - } -} - -// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for -// the baggage restrictions. -func (options) RefreshInterval(refreshInterval time.Duration) Option { - return func(o *options) { - o.refreshInterval = refreshInterval - } -} - -func applyOptions(o ...Option) options { - opts := options{} - for _, option := range o { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = jaeger.NewNullMetrics() - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - if opts.hostPort == "" { - opts.hostPort = defaultHostPort - } - if opts.refreshInterval == 0 { - opts.refreshInterval = defaultRefreshInterval - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go deleted file mode 100644 index 2f58bb541a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "context" - "fmt" - "net/url" - "sync" - "time" - - "github.com/uber/jaeger-client-go/internal/baggage" - thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage" - "github.com/uber/jaeger-client-go/utils" -) - -type httpBaggageRestrictionManagerProxy struct { - url string -} - -func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy { - v := url.Values{} - v.Set("service", serviceName) - return &httpBaggageRestrictionManagerProxy{ - url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()), - } -} - -func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) { - var out []*thrift.BaggageRestriction - if err := utils.GetJSON(s.url, &out); err != nil { - return nil, err - } - return out, nil -} - -// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent -type RestrictionManager struct { - options - - mux sync.RWMutex - serviceName string - restrictions map[string]*baggage.Restriction - thriftProxy thrift.BaggageRestrictionManager - pollStopped sync.WaitGroup - stopPoll chan struct{} - invalidRestriction *baggage.Restriction - validRestriction *baggage.Restriction - - // Determines if the manager has successfully retrieved baggage restrictions from agent - initialized bool -} - -// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest -// baggage restrictions. -func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager { - // TODO there is a developing use case where a single tracer can generate traces on behalf of many services. - // restrictionsMap will need to exist per service - opts := applyOptions(options...) - m := &RestrictionManager{ - serviceName: serviceName, - options: opts, - restrictions: make(map[string]*baggage.Restriction), - thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName), - stopPoll: make(chan struct{}), - invalidRestriction: baggage.NewRestriction(false, 0), - validRestriction: baggage.NewRestriction(true, defaultMaxValueLength), - } - m.pollStopped.Add(1) - go m.pollManager() - return m -} - -// isReady returns true if the manager has retrieved baggage restrictions from the remote source. -func (m *RestrictionManager) isReady() bool { - m.mux.RLock() - defer m.mux.RUnlock() - return m.initialized -} - -// GetRestriction implements RestrictionManager#GetRestriction. -func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction { - m.mux.RLock() - defer m.mux.RUnlock() - if !m.initialized { - if m.denyBaggageOnInitializationFailure { - return m.invalidRestriction - } - return m.validRestriction - } - if restriction, ok := m.restrictions[key]; ok { - return restriction - } - return m.invalidRestriction -} - -// Close stops remote polling and closes the RemoteRestrictionManager. -func (m *RestrictionManager) Close() error { - close(m.stopPoll) - m.pollStopped.Wait() - return nil -} - -func (m *RestrictionManager) pollManager() { - defer m.pollStopped.Done() - // attempt to initialize baggage restrictions - if err := m.updateRestrictions(); err != nil { - m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error())) - } - ticker := time.NewTicker(m.refreshInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := m.updateRestrictions(); err != nil { - m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error())) - } - case <-m.stopPoll: - return - } - } -} - -func (m *RestrictionManager) updateRestrictions() error { - restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName) - if err != nil { - m.metrics.BaggageRestrictionsUpdateFailure.Inc(1) - return err - } - newRestrictions := m.parseRestrictions(restrictions) - m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1) - m.mux.Lock() - defer m.mux.Unlock() - m.initialized = true - m.restrictions = newRestrictions - return nil -} - -func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction { - setters := make(map[string]*baggage.Restriction, len(restrictions)) - for _, restriction := range restrictions { - setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength)) - } - return setters -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go deleted file mode 100644 index c16a5c5662..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage - -const ( - defaultMaxValueLength = 2048 -) - -// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value. -type Restriction struct { - keyAllowed bool - maxValueLength int -} - -// NewRestriction returns a new Restriction. -func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction { - return &Restriction{ - keyAllowed: keyAllowed, - maxValueLength: maxValueLength, - } -} - -// KeyAllowed returns whether the baggage key for this restriction is allowed. -func (r *Restriction) KeyAllowed() bool { - return r.keyAllowed -} - -// MaxValueLength returns the max length for the baggage value. -func (r *Restriction) MaxValueLength() int { - return r.maxValueLength -} - -// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager -// will return a Restriction for a specific baggage key which will determine whether the baggage -// key is allowed for the current service and any other applicable restrictions on the baggage -// value. -type RestrictionManager interface { - GetRestriction(service, key string) *Restriction -} - -// DefaultRestrictionManager allows any baggage key. -type DefaultRestrictionManager struct { - defaultRestriction *Restriction -} - -// NewDefaultRestrictionManager returns a DefaultRestrictionManager. -func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager { - if maxValueLength == 0 { - maxValueLength = defaultMaxValueLength - } - return &DefaultRestrictionManager{ - defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength}, - } -} - -// GetRestriction implements RestrictionManager#GetRestriction. -func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction { - return m.defaultRestriction -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go deleted file mode 100644 index fe0bef268a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reporterstats - -// ReporterStats exposes some metrics from the RemoteReporter. -type ReporterStats interface { - SpansDroppedFromQueue() int64 -} - -// Receiver can be implemented by a Transport to be given ReporterStats. -type Receiver interface { - SetReporterStats(ReporterStats) -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go deleted file mode 100644 index 0e10b8a5aa..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spanlog - -import ( - "encoding/json" - "fmt" - - "github.com/opentracing/opentracing-go/log" -) - -type fieldsAsMap map[string]string - -// MaterializeWithJSON converts log Fields into JSON string -// TODO refactor into pluggable materializer -func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { - fields := fieldsAsMap(make(map[string]string, len(logFields))) - for _, field := range logFields { - field.Marshal(fields) - } - if event, ok := fields["event"]; ok && len(fields) == 1 { - return []byte(event), nil - } - return json.Marshal(fields) -} - -func (ml fieldsAsMap) EmitString(key, value string) { - ml[key] = value -} - -func (ml fieldsAsMap) EmitBool(key string, value bool) { - ml[key] = fmt.Sprintf("%t", value) -} - -func (ml fieldsAsMap) EmitInt(key string, value int) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitInt32(key string, value int32) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitInt64(key string, value int64) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitUint32(key string, value uint32) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitUint64(key string, value uint64) { - ml[key] = fmt.Sprintf("%d", value) -} - -func (ml fieldsAsMap) EmitFloat32(key string, value float32) { - ml[key] = fmt.Sprintf("%f", value) -} - -func (ml fieldsAsMap) EmitFloat64(key string, value float64) { - ml[key] = fmt.Sprintf("%f", value) -} - -func (ml fieldsAsMap) EmitObject(key string, value interface{}) { - ml[key] = fmt.Sprintf("%+v", value) -} - -func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { - value(ml) -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go deleted file mode 100644 index f52c322fb6..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "time" - - "github.com/uber/jaeger-client-go" -) - -const ( - defaultHostPort = "localhost:5778" - defaultRefreshInterval = time.Second * 5 -) - -// Option is a function that sets some option on the Throttler -type Option func(options *options) - -// Options is a factory for all available options -var Options options - -type options struct { - metrics *jaeger.Metrics - logger jaeger.Logger - hostPort string - refreshInterval time.Duration - synchronousInitialization bool -} - -// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics. -func (options) Metrics(m *jaeger.Metrics) Option { - return func(o *options) { - o.metrics = m - } -} - -// Logger creates an Option that sets the logger used by the Throttler. -func (options) Logger(logger jaeger.Logger) Option { - return func(o *options) { - o.logger = logger - } -} - -// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits. -func (options) HostPort(hostPort string) Option { - return func(o *options) { - o.hostPort = hostPort - } -} - -// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for -// credits. -func (options) RefreshInterval(refreshInterval time.Duration) Option { - return func(o *options) { - o.refreshInterval = refreshInterval - } -} - -// SynchronousInitialization creates an Option that determines whether the throttler should synchronously -// fetch credits from the agent when an operation is seen for the first time. This should be set to true -// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront -// such that sampling or throttling occurs. -func (options) SynchronousInitialization(b bool) Option { - return func(o *options) { - o.synchronousInitialization = b - } -} - -func applyOptions(o ...Option) options { - opts := options{} - for _, option := range o { - option(&opts) - } - if opts.metrics == nil { - opts.metrics = jaeger.NewNullMetrics() - } - if opts.logger == nil { - opts.logger = jaeger.NullLogger - } - if opts.hostPort == "" { - opts.hostPort = defaultHostPort - } - if opts.refreshInterval == 0 { - opts.refreshInterval = defaultRefreshInterval - } - return opts -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go deleted file mode 100644 index 20f434fe49..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package remote - -import ( - "fmt" - "net/url" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" - - "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - // minimumCredits is the minimum amount of credits necessary to not be throttled. - // i.e. if currentCredits > minimumCredits, then the operation will not be throttled. - minimumCredits = 1.0 -) - -var ( - errorUUIDNotSet = errors.New("Throttler UUID must be set") -) - -type operationBalance struct { - Operation string `json:"operation"` - Balance float64 `json:"balance"` -} - -type creditResponse struct { - Balances []operationBalance `json:"balances"` -} - -type httpCreditManagerProxy struct { - hostPort string -} - -func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy { - return &httpCreditManagerProxy{ - hostPort: hostPort, - } -} - -// N.B. Operations list must not be empty. -func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) { - params := url.Values{} - params.Set("service", serviceName) - params.Set("uuid", uuid) - for _, op := range operations { - params.Add("operations", op) - } - var resp creditResponse - if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil { - return nil, errors.Wrap(err, "Failed to receive credits from agent") - } - return &resp, nil -} - -// Throttler retrieves credits from agent and uses it to throttle operations. -type Throttler struct { - options - - mux sync.RWMutex - service string - uuid atomic.Value - creditManager *httpCreditManagerProxy - credits map[string]float64 // map of operation->credits - close chan struct{} - stopped sync.WaitGroup -} - -// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle -// the service. -func NewThrottler(service string, options ...Option) *Throttler { - opts := applyOptions(options...) - creditManager := newHTTPCreditManagerProxy(opts.hostPort) - t := &Throttler{ - options: opts, - creditManager: creditManager, - service: service, - credits: make(map[string]float64), - close: make(chan struct{}), - } - t.stopped.Add(1) - go t.pollManager() - return t -} - -// IsAllowed implements Throttler#IsAllowed. -func (t *Throttler) IsAllowed(operation string) bool { - t.mux.Lock() - defer t.mux.Unlock() - value, ok := t.credits[operation] - if !ok || value == 0 { - if !ok { - // NOTE: This appears to be a no-op at first glance, but it stores - // the operation key in the map. Necessary for functionality of - // Throttler#operations method. - t.credits[operation] = 0 - } - if !t.synchronousInitialization { - t.metrics.ThrottledDebugSpans.Inc(1) - return false - } - // If it is the first time this operation is being checked, synchronously fetch - // the credits. - credits, err := t.fetchCredits([]string{operation}) - if err != nil { - // Failed to receive credits from agent, try again next time - t.logger.Error("Failed to fetch credits: " + err.Error()) - return false - } - if len(credits.Balances) == 0 { - // This shouldn't happen but just in case - return false - } - for _, opBalance := range credits.Balances { - t.credits[opBalance.Operation] += opBalance.Balance - } - } - return t.isAllowed(operation) -} - -// Close stops the throttler from fetching credits from remote. -func (t *Throttler) Close() error { - close(t.close) - t.stopped.Wait() - return nil -} - -// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote -// requests are made. -func (t *Throttler) SetProcess(process jaeger.Process) { - if process.UUID != "" { - t.uuid.Store(process.UUID) - } -} - -// N.B. This function must be called with the Write Lock -func (t *Throttler) isAllowed(operation string) bool { - credits := t.credits[operation] - if credits < minimumCredits { - t.metrics.ThrottledDebugSpans.Inc(1) - return false - } - t.credits[operation] = credits - minimumCredits - return true -} - -func (t *Throttler) pollManager() { - defer t.stopped.Done() - ticker := time.NewTicker(t.refreshInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - t.refreshCredits() - case <-t.close: - return - } - } -} - -func (t *Throttler) operations() []string { - t.mux.RLock() - defer t.mux.RUnlock() - operations := make([]string, 0, len(t.credits)) - for op := range t.credits { - operations = append(operations, op) - } - return operations -} - -func (t *Throttler) refreshCredits() { - operations := t.operations() - if len(operations) == 0 { - return - } - newCredits, err := t.fetchCredits(operations) - if err != nil { - t.metrics.ThrottlerUpdateFailure.Inc(1) - t.logger.Error("Failed to fetch credits: " + err.Error()) - return - } - t.metrics.ThrottlerUpdateSuccess.Inc(1) - - t.mux.Lock() - defer t.mux.Unlock() - for _, opBalance := range newCredits.Balances { - t.credits[opBalance.Operation] += opBalance.Balance - } -} - -func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) { - uuid := t.uuid.Load() - uuidStr, _ := uuid.(string) - if uuid == nil || uuidStr == "" { - return nil, errorUUIDNotSet - } - return t.creditManager.FetchCredits(uuidStr, t.service, operations) -} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go deleted file mode 100644 index 196ed69cac..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package throttler - -// Throttler is used to rate limits operations. For example, given how debug spans -// are always sampled, a throttler can be enabled per client to rate limit the amount -// of debug spans a client can start. -type Throttler interface { - // IsAllowed determines whether the operation should be allowed and not be - // throttled. - IsAllowed(operation string) bool -} - -// DefaultThrottler doesn't throttle at all. -type DefaultThrottler struct{} - -// IsAllowed implements Throttler#IsAllowed. -func (t DefaultThrottler) IsAllowed(operation string) bool { - return true -} diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go deleted file mode 100644 index 8402d087c2..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/interop.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go" -) - -// TODO this file should not be needed after TChannel PR. - -type formatKey int - -// SpanContextFormat is a constant used as OpenTracing Format. -// Requires *SpanContext as carrier. -// This format is intended for interop with TChannel or other Zipkin-like tracers. -const SpanContextFormat formatKey = iota - -type jaegerTraceContextPropagator struct { - tracer *Tracer -} - -func (p *jaegerTraceContextPropagator) Inject( - ctx SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(*SpanContext) - if !ok { - return opentracing.ErrInvalidCarrier - } - - carrier.CopyFrom(&ctx) - return nil -} - -func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(*SpanContext) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - ctx := new(SpanContext) - ctx.CopyFrom(carrier) - return *ctx, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go deleted file mode 100644 index 868b2a5b54..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - - "github.com/opentracing/opentracing-go/log" - - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" -) - -type tags []*j.Tag - -// ConvertLogsToJaegerTags converts log Fields into jaeger tags. -func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag { - fields := tags(make([]*j.Tag, 0, len(logFields))) - for _, field := range logFields { - field.Marshal(&fields) - } - return fields -} - -func (t *tags) EmitString(key, value string) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value}) -} - -func (t *tags) EmitBool(key string, value bool) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value}) -} - -func (t *tags) EmitInt(key string, value int) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitInt32(key string, value int32) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitInt64(key string, value int64) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value}) -} - -func (t *tags) EmitUint32(key string, value uint32) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitUint64(key string, value uint64) { - vLong := int64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) -} - -func (t *tags) EmitFloat32(key string, value float32) { - vDouble := float64(value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble}) -} - -func (t *tags) EmitFloat64(key string, value float64) { - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value}) -} - -func (t *tags) EmitObject(key string, value interface{}) { - vStr := fmt.Sprintf("%+v", value) - *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr}) -} - -func (t *tags) EmitLazyLogger(value log.LazyLogger) { - value(t) -} diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go deleted file mode 100644 index 3ac2f8f949..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/opentracing/opentracing-go" - - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/utils" -) - -// BuildJaegerThrift builds jaeger span based on internal span. -// TODO: (breaking change) move to internal package. -func BuildJaegerThrift(span *Span) *j.Span { - span.Lock() - defer span.Unlock() - startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) - duration := span.duration.Nanoseconds() / int64(time.Microsecond) - jaegerSpan := &j.Span{ - TraceIdLow: int64(span.context.traceID.Low), - TraceIdHigh: int64(span.context.traceID.High), - SpanId: int64(span.context.spanID), - ParentSpanId: int64(span.context.parentID), - OperationName: span.operationName, - Flags: int32(span.context.samplingState.flags()), - StartTime: startTime, - Duration: duration, - Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength), - Logs: buildLogs(span.logs), - References: buildReferences(span.references), - } - return jaegerSpan -} - -// BuildJaegerProcessThrift creates a thrift Process type. -// TODO: (breaking change) move to internal package. -func BuildJaegerProcessThrift(span *Span) *j.Process { - span.Lock() - defer span.Unlock() - return buildJaegerProcessThrift(span.tracer) -} - -func buildJaegerProcessThrift(tracer *Tracer) *j.Process { - process := &j.Process{ - ServiceName: tracer.serviceName, - Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength), - } - if tracer.process.UUID != "" { - process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING}) - } - return process -} - -func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { - jTags := make([]*j.Tag, 0, len(tags)) - for _, tag := range tags { - jTag := buildTag(&tag, maxTagValueLength) - jTags = append(jTags, jTag) - } - return jTags -} - -func buildLogs(logs []opentracing.LogRecord) []*j.Log { - jLogs := make([]*j.Log, 0, len(logs)) - for _, log := range logs { - jLog := &j.Log{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), - Fields: ConvertLogsToJaegerTags(log.Fields), - } - jLogs = append(jLogs, jLog) - } - return jLogs -} - -func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { - jTag := &j.Tag{Key: tag.key} - switch value := tag.value.(type) { - case string: - vStr := truncateString(value, maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - case []byte: - if len(value) > maxTagValueLength { - value = value[:maxTagValueLength] - } - jTag.VBinary = value - jTag.VType = j.TagType_BINARY - case int: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int64: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint64: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case float32: - vDouble := float64(value) - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case float64: - vDouble := float64(value) - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case bool: - vBool := value - jTag.VBool = &vBool - jTag.VType = j.TagType_BOOL - default: - vStr := truncateString(stringify(value), maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - } - return jTag -} - -func buildReferences(references []Reference) []*j.SpanRef { - retMe := make([]*j.SpanRef, 0, len(references)) - for _, ref := range references { - if ref.Type == opentracing.ChildOfRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF)) - } else if ref.Type == opentracing.FollowsFromRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM)) - } - } - return retMe -} - -func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef { - return &j.SpanRef{ - RefType: refType, - TraceIdLow: int64(ctx.traceID.Low), - TraceIdHigh: int64(ctx.traceID.High), - SpanId: int64(ctx.spanID), - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go deleted file mode 100644 index ced6e0ce93..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/log/logger.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "bytes" - "fmt" - "log" - "sync" -) - -// Logger provides an abstract interface for logging from Reporters. -// Applications can provide their own implementation of this interface to adapt -// reporters logging to whatever logging library they prefer (stdlib log, -// logrus, go-logging, etc). -type Logger interface { - // Error logs a message at error priority - Error(msg string) - - // Infof logs a message at info priority - Infof(msg string, args ...interface{}) -} - -// StdLogger is implementation of the Logger interface that delegates to default `log` package -var StdLogger = &stdLogger{} - -type stdLogger struct{} - -func (l *stdLogger) Error(msg string) { - log.Printf("ERROR: %s", msg) -} - -// Infof logs a message at info priority -func (l *stdLogger) Infof(msg string, args ...interface{}) { - log.Printf(msg, args...) -} - -// Debugf logs a message at debug priority -func (l *stdLogger) Debugf(msg string, args ...interface{}) { - log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...) -} - -// NullLogger is implementation of the Logger interface that is no-op -var NullLogger = &nullLogger{} - -type nullLogger struct{} - -func (l *nullLogger) Error(msg string) {} -func (l *nullLogger) Infof(msg string, args ...interface{}) {} -func (l *nullLogger) Debugf(msg string, args ...interface{}) {} - -// BytesBufferLogger implements Logger backed by a bytes.Buffer. -type BytesBufferLogger struct { - mux sync.Mutex - buf bytes.Buffer -} - -// Error implements Logger. -func (l *BytesBufferLogger) Error(msg string) { - l.mux.Lock() - l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg)) - l.mux.Unlock() -} - -// Infof implements Logger. -func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) { - l.mux.Lock() - l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n") - l.mux.Unlock() -} - -// Debugf implements Logger. -func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) { - l.mux.Lock() - l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n") - l.mux.Unlock() -} - -// String returns string representation of the underlying buffer. -func (l *BytesBufferLogger) String() string { - l.mux.Lock() - defer l.mux.Unlock() - return l.buf.String() -} - -// Flush empties the underlying buffer. -func (l *BytesBufferLogger) Flush() { - l.mux.Lock() - defer l.mux.Unlock() - l.buf.Reset() -} - -// DebugLogger is an interface which adds a debug logging level -type DebugLogger interface { - Logger - - // Debugf logs a message at debug priority - Debugf(msg string, args ...interface{}) -} - -// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger -// If the provided Logger doesn't satisfy the interface, a logger with debug -// disabled is returned -func DebugLogAdapter(logger Logger) DebugLogger { - if logger == nil { - return nil - } - if debugLogger, ok := logger.(DebugLogger); ok { - return debugLogger - } - logger.Infof("debug logging disabled") - return debugDisabledLogAdapter{logger: logger} -} - -type debugDisabledLogAdapter struct { - logger Logger -} - -func (d debugDisabledLogAdapter) Error(msg string) { - d.logger.Error(msg) -} - -func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) { - d.logger.Infof(msg, args...) -} - -// Debugf is a nop -func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) { -} diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go deleted file mode 100644 index d4f0b50192..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/logger.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "log" - -// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead. - -// Logger provides an abstract interface for logging from Reporters. -// Applications can provide their own implementation of this interface to adapt -// reporters logging to whatever logging library they prefer (stdlib log, -// logrus, go-logging, etc). -type Logger interface { - // Error logs a message at error priority - Error(msg string) - - // Infof logs a message at info priority - Infof(msg string, args ...interface{}) -} - -// StdLogger is implementation of the Logger interface that delegates to default `log` package -var StdLogger = &stdLogger{} - -type stdLogger struct{} - -func (l *stdLogger) Error(msg string) { - log.Printf("ERROR: %s", msg) -} - -// Infof logs a message at info priority -func (l *stdLogger) Infof(msg string, args ...interface{}) { - log.Printf(msg, args...) -} - -// NullLogger is implementation of the Logger interface that delegates to default `log` package -var NullLogger = &nullLogger{} - -type nullLogger struct{} - -func (l *nullLogger) Error(msg string) {} -func (l *nullLogger) Infof(msg string, args ...interface{}) {} diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go deleted file mode 100644 index 50e4e22d6c..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/metrics.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/uber/jaeger-lib/metrics" -) - -// Metrics is a container of all stats emitted by Jaeger tracer. -type Metrics struct { - // Number of traces started by this tracer as sampled - TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"` - - // Number of traces started by this tracer as not sampled - TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"` - - // Number of traces started by this tracer with delayed sampling - TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"` - - // Number of externally started sampled traces this tracer joined - TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"` - - // Number of externally started not-sampled traces this tracer joined - TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"` - - // Number of sampled spans started by this tracer - SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"` - - // Number of not sampled spans started by this tracer - SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"` - - // Number of spans with delayed sampling started by this tracer - SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"` - - // Number of spans finished by this tracer - SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"` - - // Number of spans finished by this tracer - SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"` - - // Number of spans finished by this tracer - SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"` - - // Number of errors decoding tracing context - DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"` - - // Number of spans successfully reported - ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"` - - // Number of spans not reported due to a Sender failure - ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"` - - // Number of spans dropped due to internal queue overflow - ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"` - - // Current number of spans in the reporter queue - ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"` - - // Number of times the Sampler succeeded to retrieve sampling strategy - SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"` - - // Number of times the Sampler failed to retrieve sampling strategy - SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"` - - // Number of times the Sampler succeeded to retrieve and update sampling strategy - SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"` - - // Number of times the Sampler failed to update sampling strategy - SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"` - - // Number of times baggage was successfully written or updated on spans. - BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"` - - // Number of times baggage failed to write or update on spans. - BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"` - - // Number of times baggage was truncated as per baggage restrictions. - BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"` - - // Number of times baggage restrictions were successfully updated. - BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"` - - // Number of times baggage restrictions failed to update. - BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"` - - // Number of times debug spans were throttled. - ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"` - - // Number of times throttler successfully updated. - ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"` - - // Number of times throttler failed to update. - ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"` -} - -// NewMetrics creates a new Metrics struct and initializes it. -func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { - m := &Metrics{} - // TODO the namespace "jaeger" should be configurable - metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags) - return m -} - -// NewNullMetrics creates a new Metrics struct that won't report any metrics. -func NewNullMetrics() *Metrics { - return NewMetrics(metrics.NullFactory, nil) -} diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go deleted file mode 100644 index 7bbd028897..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/observer.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import opentracing "github.com/opentracing/opentracing-go" - -// Observer can be registered with the Tracer to receive notifications about -// new Spans. -// -// Deprecated: use jaeger.ContribObserver instead. -type Observer interface { - OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver -} - -// SpanObserver is created by the Observer and receives notifications about -// other Span events. -// -// Deprecated: use jaeger.ContribSpanObserver instead. -type SpanObserver interface { - OnSetOperationName(operationName string) - OnSetTag(key string, value interface{}) - OnFinish(options opentracing.FinishOptions) -} - -// compositeObserver is a dispatcher to other observers -type compositeObserver struct { - observers []ContribObserver -} - -// compositeSpanObserver is a dispatcher to other span observers -type compositeSpanObserver struct { - observers []ContribSpanObserver -} - -// noopSpanObserver is used when there are no observers registered -// on the Tracer or none of them returns span observers from OnStartSpan. -var noopSpanObserver = &compositeSpanObserver{} - -func (o *compositeObserver) append(contribObserver ContribObserver) { - o.observers = append(o.observers, contribObserver) -} - -func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver { - var spanObservers []ContribSpanObserver - for _, obs := range o.observers { - spanObs, ok := obs.OnStartSpan(sp, operationName, options) - if ok { - if spanObservers == nil { - spanObservers = make([]ContribSpanObserver, 0, len(o.observers)) - } - spanObservers = append(spanObservers, spanObs) - } - } - if len(spanObservers) == 0 { - return noopSpanObserver - } - return &compositeSpanObserver{observers: spanObservers} -} - -func (o *compositeSpanObserver) OnSetOperationName(operationName string) { - for _, obs := range o.observers { - obs.OnSetOperationName(operationName) - } -} - -func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) { - for _, obs := range o.observers { - obs.OnSetTag(key, value) - } -} - -func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) { - for _, obs := range o.observers { - obs.OnFinish(options) - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go deleted file mode 100644 index 30cbf99624..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/process.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -// Process holds process specific metadata that's relevant to this client. -type Process struct { - Service string - UUID string - Tags []Tag -} - -// ProcessSetter sets a process. This can be used by any class that requires -// the process to be set as part of initialization. -// See internal/throttler/remote/throttler.go for an example. -type ProcessSetter interface { - SetProcess(process Process) -} diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go deleted file mode 100644 index e06459b98f..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/propagation.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "log" - "net/url" - "strings" - "sync" - - opentracing "github.com/opentracing/opentracing-go" -) - -// Injector is responsible for injecting SpanContext instances in a manner suitable -// for propagation via a format-specific "carrier" object. Typically the -// injection will take place across an RPC boundary, but message queues and -// other IPC mechanisms are also reasonable places to use an Injector. -type Injector interface { - // Inject takes `SpanContext` and injects it into `carrier`. The actual type - // of `carrier` depends on the `format` passed to `Tracer.Inject()`. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if injection fails. - Inject(ctx SpanContext, carrier interface{}) error -} - -// Extractor is responsible for extracting SpanContext instances from a -// format-specific "carrier" object. Typically the extraction will take place -// on the server side of an RPC boundary, but message queues and other IPC -// mechanisms are also reasonable places to use an Extractor. -type Extractor interface { - // Extract decodes a SpanContext instance from the given `carrier`, - // or (nil, opentracing.ErrSpanContextNotFound) if no context could - // be found in the `carrier`. - Extract(carrier interface{}) (SpanContext, error) -} - -// TextMapPropagator is a combined Injector and Extractor for TextMap format -type TextMapPropagator struct { - headerKeys *HeadersConfig - metrics Metrics - encodeValue func(string) string - decodeValue func(string) string -} - -// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format -func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { - return &TextMapPropagator{ - headerKeys: headerKeys, - metrics: metrics, - encodeValue: func(val string) string { - return val - }, - decodeValue: func(val string) string { - return val - }, - } -} - -// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format -func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator { - return &TextMapPropagator{ - headerKeys: headerKeys, - metrics: metrics, - encodeValue: func(val string) string { - return url.QueryEscape(val) - }, - decodeValue: func(val string) string { - // ignore decoding errors, cannot do anything about them - if v, err := url.QueryUnescape(val); err == nil { - return v - } - return val - }, - } -} - -// BinaryPropagator is a combined Injector and Extractor for Binary format -type BinaryPropagator struct { - tracer *Tracer - buffers sync.Pool -} - -// NewBinaryPropagator creates a combined Injector and Extractor for Binary format -func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator { - return &BinaryPropagator{ - tracer: tracer, - buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}, - } -} - -// Inject implements Injector of TextMapPropagator -func (p *TextMapPropagator) Inject( - sc SpanContext, - abstractCarrier interface{}, -) error { - textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter) - if !ok { - return opentracing.ErrInvalidCarrier - } - - // Do not encode the string with trace context to avoid accidental double-encoding - // if people are using opentracing < 0.10.0. Our colon-separated representation - // of the trace context is already safe for HTTP headers. - textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String()) - for k, v := range sc.baggage { - safeKey := p.addBaggageKeyPrefix(k) - safeVal := p.encodeValue(v) - textMapWriter.Set(safeKey, safeVal) - } - return nil -} - -// Extract implements Extractor of TextMapPropagator -func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - textMapReader, ok := abstractCarrier.(opentracing.TextMapReader) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - var ctx SpanContext - var baggage map[string]string - err := textMapReader.ForeachKey(func(rawKey, value string) error { - key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap - if key == p.headerKeys.TraceContextHeaderName { - var err error - safeVal := p.decodeValue(value) - if ctx, err = ContextFromString(safeVal); err != nil { - return err - } - } else if key == p.headerKeys.JaegerDebugHeader { - ctx.debugID = p.decodeValue(value) - } else if key == p.headerKeys.JaegerBaggageHeader { - if baggage == nil { - baggage = make(map[string]string) - } - for k, v := range p.parseCommaSeparatedMap(value) { - baggage[k] = v - } - } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) { - if baggage == nil { - baggage = make(map[string]string) - } - safeKey := p.removeBaggageKeyPrefix(key) - safeVal := p.decodeValue(value) - baggage[safeKey] = safeVal - } - return nil - }) - if err != nil { - p.metrics.DecodingErrors.Inc(1) - return emptyContext, err - } - if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 { - return emptyContext, opentracing.ErrSpanContextNotFound - } - ctx.baggage = baggage - return ctx, nil -} - -// Inject implements Injector of BinaryPropagator -func (p *BinaryPropagator) Inject( - sc SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(io.Writer) - if !ok { - return opentracing.ErrInvalidCarrier - } - - // Handle the tracer context - if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil { - return err - } - if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil { - return err - } - - // Handle the baggage items - if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil { - return err - } - for k, v := range sc.baggage { - if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil { - return err - } - io.WriteString(carrier, k) - if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil { - return err - } - io.WriteString(carrier, v) - } - - return nil -} - -// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits -const ( - maxBinaryBaggage = 180 - maxBinaryNameValueLen = 4096 -) - -// Extract implements Extractor of BinaryPropagator -func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(io.Reader) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - var ctx SpanContext - ctx.samplingState = &samplingState{} - - if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - - var flags byte - if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - ctx.samplingState.setFlags(flags) - - // Handle the baggage items - var numBaggage int32 - if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if numBaggage > maxBinaryBaggage { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if iNumBaggage := int(numBaggage); iNumBaggage > 0 { - ctx.baggage = make(map[string]string, iNumBaggage) - buf := p.buffers.Get().(*bytes.Buffer) - defer p.buffers.Put(buf) - - var keyLen, valLen int32 - for i := 0; i < iNumBaggage; i++ { - if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - buf.Reset() - buf.Grow(int(keyLen)) - if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - key := buf.String() - - if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - if keyLen+valLen > maxBinaryNameValueLen { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - buf.Reset() - buf.Grow(int(valLen)) - if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen { - return emptyContext, opentracing.ErrSpanContextCorrupted - } - ctx.baggage[key] = buf.String() - } - } - - return ctx, nil -} - -// Converts a comma separated key value pair list into a map -// e.g. key1=value1, key2=value2, key3 = value3 -// is converted to map[string]string { "key1" : "value1", -// "key2" : "value2", -// "key3" : "value3" } -func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string { - baggage := make(map[string]string) - value, err := url.QueryUnescape(value) - if err != nil { - log.Printf("Unable to unescape %s, %v", value, err) - return baggage - } - for _, kvpair := range strings.Split(value, ",") { - kv := strings.Split(strings.TrimSpace(kvpair), "=") - if len(kv) == 2 { - baggage[strings.TrimSpace(kv[0])] = kv[1] - } else { - log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader) - } - } - return baggage -} - -// Converts a baggage item key into an http header format, -// by prepending TraceBaggageHeaderPrefix and encoding the key string -func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string { - // TODO encodeBaggageKeyAsHeader add caching and escaping - return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key) -} - -func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string { - // TODO decodeBaggageHeaderKey add caching and escaping - return key[len(p.headerKeys.TraceBaggageHeaderPrefix):] -} diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go deleted file mode 100644 index 5646e78bb2..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/reference.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "github.com/opentracing/opentracing-go" - -// Reference represents a causal reference to other Spans (via their SpanContext). -type Reference struct { - Type opentracing.SpanReferenceType - Context SpanContext -} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go deleted file mode 100644 index a71a92c3e8..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/reporter.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/uber/jaeger-client-go/internal/reporterstats" - "github.com/uber/jaeger-client-go/log" -) - -// Reporter is called by the tracer when a span is completed to report the span to the tracing collector. -type Reporter interface { - // Report submits a new span to collectors, possibly asynchronously and/or with buffering. - // If the reporter is processing Span asynchronously then it needs to Retain() the span, - // and then Release() it when no longer needed, to avoid span data corruption. - Report(span *Span) - - // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory. - Close() -} - -// ------------------------------ - -type nullReporter struct{} - -// NewNullReporter creates a no-op reporter that ignores all reported spans. -func NewNullReporter() Reporter { - return &nullReporter{} -} - -// Report implements Report() method of Reporter by doing nothing. -func (r *nullReporter) Report(span *Span) { - // no-op -} - -// Close implements Close() method of Reporter by doing nothing. -func (r *nullReporter) Close() { - // no-op -} - -// ------------------------------ - -type loggingReporter struct { - logger Logger -} - -// NewLoggingReporter creates a reporter that logs all reported spans to provided logger. -func NewLoggingReporter(logger Logger) Reporter { - return &loggingReporter{logger} -} - -// Report implements Report() method of Reporter by logging the span to the logger. -func (r *loggingReporter) Report(span *Span) { - r.logger.Infof("Reporting span %+v", span) -} - -// Close implements Close() method of Reporter by doing nothing. -func (r *loggingReporter) Close() { - // no-op -} - -// ------------------------------ - -// InMemoryReporter is used for testing, and simply collects spans in memory. -type InMemoryReporter struct { - spans []opentracing.Span - lock sync.Mutex -} - -// NewInMemoryReporter creates a reporter that stores spans in memory. -// NOTE: the Tracer should be created with options.PoolSpans = false. -func NewInMemoryReporter() *InMemoryReporter { - return &InMemoryReporter{ - spans: make([]opentracing.Span, 0, 10), - } -} - -// Report implements Report() method of Reporter by storing the span in the buffer. -func (r *InMemoryReporter) Report(span *Span) { - r.lock.Lock() - // Need to retain the span otherwise it will be released - r.spans = append(r.spans, span.Retain()) - r.lock.Unlock() -} - -// Close implements Close() method of Reporter -func (r *InMemoryReporter) Close() { - r.Reset() -} - -// SpansSubmitted returns the number of spans accumulated in the buffer. -func (r *InMemoryReporter) SpansSubmitted() int { - r.lock.Lock() - defer r.lock.Unlock() - return len(r.spans) -} - -// GetSpans returns accumulated spans as a copy of the buffer. -func (r *InMemoryReporter) GetSpans() []opentracing.Span { - r.lock.Lock() - defer r.lock.Unlock() - copied := make([]opentracing.Span, len(r.spans)) - copy(copied, r.spans) - return copied -} - -// Reset clears all accumulated spans. -func (r *InMemoryReporter) Reset() { - r.lock.Lock() - defer r.lock.Unlock() - - // Before reset the collection need to release Span memory - for _, span := range r.spans { - span.(*Span).Release() - } - r.spans = r.spans[:0] -} - -// ------------------------------ - -type compositeReporter struct { - reporters []Reporter -} - -// NewCompositeReporter creates a reporter that ignores all reported spans. -func NewCompositeReporter(reporters ...Reporter) Reporter { - return &compositeReporter{reporters: reporters} -} - -// Report implements Report() method of Reporter by delegating to each underlying reporter. -func (r *compositeReporter) Report(span *Span) { - for _, reporter := range r.reporters { - reporter.Report(span) - } -} - -// Close implements Close() method of Reporter by closing each underlying reporter. -func (r *compositeReporter) Close() { - for _, reporter := range r.reporters { - reporter.Close() - } -} - -// ------------- REMOTE REPORTER ----------------- - -type reporterQueueItemType int - -const ( - defaultQueueSize = 100 - defaultBufferFlushInterval = 1 * time.Second - - reporterQueueItemSpan reporterQueueItemType = iota - reporterQueueItemClose -) - -type reporterQueueItem struct { - itemType reporterQueueItemType - span *Span - close *sync.WaitGroup -} - -// reporterStats implements reporterstats.ReporterStats. -type reporterStats struct { - droppedCount int64 // provided to Transports to report data loss to the backend -} - -// SpansDroppedFromQueue implements reporterstats.ReporterStats. -func (r *reporterStats) SpansDroppedFromQueue() int64 { - return atomic.LoadInt64(&r.droppedCount) -} - -func (r *reporterStats) incDroppedCount() { - atomic.AddInt64(&r.droppedCount, 1) -} - -type remoteReporter struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - queueLength int64 // used to update metrics.Gauge - closed int64 // 0 - not closed, 1 - closed - - reporterOptions - - sender Transport - queue chan reporterQueueItem - reporterStats *reporterStats -} - -// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender. -// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped). -// Periodically the transport buffer is flushed even if it hasn't reached max packet size. -// Calls to Close() block until all spans reported prior to the call to Close are flushed. -func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter { - options := reporterOptions{} - for _, option := range opts { - option(&options) - } - if options.bufferFlushInterval <= 0 { - options.bufferFlushInterval = defaultBufferFlushInterval - } - if options.logger == nil { - options.logger = log.NullLogger - } - if options.metrics == nil { - options.metrics = NewNullMetrics() - } - if options.queueSize <= 0 { - options.queueSize = defaultQueueSize - } - reporter := &remoteReporter{ - reporterOptions: options, - sender: sender, - queue: make(chan reporterQueueItem, options.queueSize), - reporterStats: new(reporterStats), - } - if receiver, ok := sender.(reporterstats.Receiver); ok { - receiver.SetReporterStats(reporter.reporterStats) - } - go reporter.processQueue() - return reporter -} - -// Report implements Report() method of Reporter. -// It passes the span to a background go-routine for submission to Jaeger backend. -// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented. -// If Report() is called after the reporter has been Close()-ed, the additional spans will not be -// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly, -// because some of them may still be successfully added to the queue. -func (r *remoteReporter) Report(span *Span) { - select { - // Need to retain the span otherwise it will be released - case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}: - atomic.AddInt64(&r.queueLength, 1) - default: - r.metrics.ReporterDropped.Inc(1) - r.reporterStats.incDroppedCount() - } -} - -// Close implements Close() method of Reporter by waiting for the queue to be drained. -func (r *remoteReporter) Close() { - r.logger.Debugf("closing reporter") - if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped { - r.logger.Error("Repeated attempt to close the reporter is ignored") - return - } - r.sendCloseEvent() - _ = r.sender.Close() -} - -func (r *remoteReporter) sendCloseEvent() { - wg := &sync.WaitGroup{} - wg.Add(1) - item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg} - - r.queue <- item // if the queue is full we will block until there is space - atomic.AddInt64(&r.queueLength, 1) - wg.Wait() -} - -// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer. -// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger. -// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped -// reporting new spans. -func (r *remoteReporter) processQueue() { - // flush causes the Sender to flush its accumulated spans and clear the buffer - flush := func() { - if flushed, err := r.sender.Flush(); err != nil { - r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error())) - } else if flushed > 0 { - r.metrics.ReporterSuccess.Inc(int64(flushed)) - } - } - - timer := time.NewTicker(r.bufferFlushInterval) - for { - select { - case <-timer.C: - flush() - case item := <-r.queue: - atomic.AddInt64(&r.queueLength, -1) - switch item.itemType { - case reporterQueueItemSpan: - span := item.span - if flushed, err := r.sender.Append(span); err != nil { - r.metrics.ReporterFailure.Inc(int64(flushed)) - r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error())) - } else if flushed > 0 { - r.metrics.ReporterSuccess.Inc(int64(flushed)) - // to reduce the number of gauge stats, we only emit queue length on flush - r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength)) - r.logger.Debugf("flushed %d spans", flushed) - } - span.Release() - case reporterQueueItemClose: - timer.Stop() - flush() - item.close.Done() - return - } - } - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go deleted file mode 100644 index 2fc030547e..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/reporter_options.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/uber/jaeger-client-go/log" -) - -// ReporterOption is a function that sets some option on the reporter. -type ReporterOption func(c *reporterOptions) - -// ReporterOptions is a factory for all available ReporterOption's -var ReporterOptions reporterOptions - -// reporterOptions control behavior of the reporter. -type reporterOptions struct { - // queueSize is the size of internal queue where reported spans are stored before they are processed in the background - queueSize int - // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full - bufferFlushInterval time.Duration - // logger is used to log errors of span submissions - logger log.DebugLogger - // metrics is used to record runtime stats - metrics *Metrics -} - -// QueueSize creates a ReporterOption that sets the size of the internal queue where -// spans are stored before they are processed. -func (reporterOptions) QueueSize(queueSize int) ReporterOption { - return func(r *reporterOptions) { - r.queueSize = queueSize - } -} - -// Metrics creates a ReporterOption that initializes Metrics in the reporter, -// which is used to record runtime statistics. -func (reporterOptions) Metrics(metrics *Metrics) ReporterOption { - return func(r *reporterOptions) { - r.metrics = metrics - } -} - -// BufferFlushInterval creates a ReporterOption that sets how often the queue -// is force-flushed. -func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption { - return func(r *reporterOptions) { - r.bufferFlushInterval = bufferFlushInterval - } -} - -// Logger creates a ReporterOption that initializes the logger used to log -// errors of span submissions. -func (reporterOptions) Logger(logger Logger) ReporterOption { - return func(r *reporterOptions) { - r.logger = log.DebugLogAdapter(logger) - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md deleted file mode 100644 index 879948e9c9..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md +++ /dev/null @@ -1,5 +0,0 @@ -An Observer that can be used to emit RPC metrics -================================================ - -It can be attached to the tracer during tracer construction. -See `ExampleObserver` function in [observer_test.go](./observer_test.go). diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go deleted file mode 100644 index 51aa11b350..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rpcmetrics implements an Observer that can be used to emit RPC metrics. -package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go deleted file mode 100644 index 30555243d0..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import "sync" - -// normalizedEndpoints is a cache for endpointName -> safeName mappings. -type normalizedEndpoints struct { - names map[string]string - maxSize int - defaultName string - normalizer NameNormalizer - mux sync.RWMutex -} - -func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints { - return &normalizedEndpoints{ - maxSize: maxSize, - normalizer: normalizer, - names: make(map[string]string, maxSize), - } -} - -// normalize looks up the name in the cache, if not found it uses normalizer -// to convert the name to a safe name. If called with more than maxSize unique -// names it returns "" for all other names beyond those already cached. -func (n *normalizedEndpoints) normalize(name string) string { - n.mux.RLock() - norm, ok := n.names[name] - l := len(n.names) - n.mux.RUnlock() - if ok { - return norm - } - if l >= n.maxSize { - return "" - } - return n.normalizeWithLock(name) -} - -func (n *normalizedEndpoints) normalizeWithLock(name string) string { - norm := n.normalizer.Normalize(name) - n.mux.Lock() - defer n.mux.Unlock() - // cache may have grown while we were not holding the lock - if len(n.names) >= n.maxSize { - return "" - } - n.names[name] = norm - return norm -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go deleted file mode 100644 index a8cec2fa68..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import ( - "sync" - - "github.com/uber/jaeger-lib/metrics" -) - -const ( - otherEndpointsPlaceholder = "other" - endpointNameMetricTag = "endpoint" -) - -// Metrics is a collection of metrics for an endpoint describing -// throughput, success, errors, and performance. -type Metrics struct { - // RequestCountSuccess is a counter of the total number of successes. - RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"` - - // RequestCountFailures is a counter of the number of times any failure has been observed. - RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"` - - // RequestLatencySuccess is a latency histogram of successful requests. - RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"` - - // RequestLatencyFailures is a latency histogram of failed requests. - RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"` - - // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299 - HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"` - - // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399 - HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"` - - // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499 - HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"` - - // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599 - HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"` -} - -func (m *Metrics) recordHTTPStatusCode(statusCode uint16) { - if statusCode >= 200 && statusCode < 300 { - m.HTTPStatusCode2xx.Inc(1) - } else if statusCode >= 300 && statusCode < 400 { - m.HTTPStatusCode3xx.Inc(1) - } else if statusCode >= 400 && statusCode < 500 { - m.HTTPStatusCode4xx.Inc(1) - } else if statusCode >= 500 && statusCode < 600 { - m.HTTPStatusCode5xx.Inc(1) - } -} - -// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name. -// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped -// to a generic endpoint name "other". -type MetricsByEndpoint struct { - metricsFactory metrics.Factory - endpoints *normalizedEndpoints - metricsByEndpoint map[string]*Metrics - mux sync.RWMutex -} - -func newMetricsByEndpoint( - metricsFactory metrics.Factory, - normalizer NameNormalizer, - maxNumberOfEndpoints int, -) *MetricsByEndpoint { - return &MetricsByEndpoint{ - metricsFactory: metricsFactory, - endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer), - metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other" - } -} - -func (m *MetricsByEndpoint) get(endpoint string) *Metrics { - safeName := m.endpoints.normalize(endpoint) - if safeName == "" { - safeName = otherEndpointsPlaceholder - } - m.mux.RLock() - met := m.metricsByEndpoint[safeName] - m.mux.RUnlock() - if met != nil { - return met - } - - return m.getWithWriteLock(safeName) -} - -// split to make easier to test -func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics { - m.mux.Lock() - defer m.mux.Unlock() - - // it is possible that the name has been already registered after we released - // the read lock and before we grabbed the write lock, so check for that. - if met, ok := m.metricsByEndpoint[safeName]; ok { - return met - } - - // it would be nice to create the struct before locking, since Init() is somewhat - // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics. - met := &Metrics{} - tags := map[string]string{endpointNameMetricTag: safeName} - metrics.Init(met, m.metricsFactory, tags) - - m.metricsByEndpoint[safeName] = met - return met -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go deleted file mode 100644 index 148d84b3a1..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -// NameNormalizer is used to convert the endpoint names to strings -// that can be safely used as tags in the metrics. -type NameNormalizer interface { - Normalize(name string) string -} - -// DefaultNameNormalizer converts endpoint names so that they contain only characters -// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'. -var DefaultNameNormalizer = &SimpleNameNormalizer{ - SafeSets: []SafeCharacterSet{ - &Range{From: 'a', To: 'z'}, - &Range{From: 'A', To: 'Z'}, - &Range{From: '0', To: '9'}, - &Char{'-'}, - &Char{'_'}, - &Char{'/'}, - &Char{'.'}, - }, - Replacement: '-', -} - -// SimpleNameNormalizer uses a set of safe character sets. -type SimpleNameNormalizer struct { - SafeSets []SafeCharacterSet - Replacement byte -} - -// SafeCharacterSet determines if the given character is "safe" -type SafeCharacterSet interface { - IsSafe(c byte) bool -} - -// Range implements SafeCharacterSet -type Range struct { - From, To byte -} - -// IsSafe implements SafeCharacterSet -func (r *Range) IsSafe(c byte) bool { - return c >= r.From && c <= r.To -} - -// Char implements SafeCharacterSet -type Char struct { - Val byte -} - -// IsSafe implements SafeCharacterSet -func (ch *Char) IsSafe(c byte) bool { - return c == ch.Val -} - -// Normalize checks each character in the string against SafeSets, -// and if it's not safe substitutes it with Replacement. -func (n *SimpleNameNormalizer) Normalize(name string) string { - var retMe []byte - nameBytes := []byte(name) - for i, b := range nameBytes { - if n.safeByte(b) { - if retMe != nil { - retMe[i] = b - } - } else { - if retMe == nil { - retMe = make([]byte, len(nameBytes)) - copy(retMe[0:i], nameBytes[0:i]) - } - retMe[i] = n.Replacement - } - } - if retMe == nil { - return name - } - return string(retMe) -} - -// safeByte checks if b against all safe charsets. -func (n *SimpleNameNormalizer) safeByte(b byte) bool { - for i := range n.SafeSets { - if n.SafeSets[i].IsSafe(b) { - return true - } - } - return false -} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go deleted file mode 100644 index eca5ff6f3b..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpcmetrics - -import ( - "strconv" - "sync" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/uber/jaeger-lib/metrics" - - jaeger "github.com/uber/jaeger-client-go" -) - -const defaultMaxNumberOfEndpoints = 200 - -// Observer is an observer that can emit RPC metrics. -type Observer struct { - metricsByEndpoint *MetricsByEndpoint -} - -// NewObserver creates a new observer that can emit RPC metrics. -func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer { - return &Observer{ - metricsByEndpoint: newMetricsByEndpoint( - metricsFactory, - normalizer, - defaultMaxNumberOfEndpoints, - ), - } -} - -// OnStartSpan creates a new Observer for the span. -func (o *Observer) OnStartSpan( - operationName string, - options opentracing.StartSpanOptions, -) jaeger.SpanObserver { - return NewSpanObserver(o.metricsByEndpoint, operationName, options) -} - -// SpanKind identifies the span as inboud, outbound, or internal -type SpanKind int - -const ( - // Local span kind - Local SpanKind = iota - // Inbound span kind - Inbound - // Outbound span kind - Outbound -) - -// SpanObserver collects RPC metrics -type SpanObserver struct { - metricsByEndpoint *MetricsByEndpoint - operationName string - startTime time.Time - mux sync.Mutex - kind SpanKind - httpStatusCode uint16 - err bool -} - -// NewSpanObserver creates a new SpanObserver that can emit RPC metrics. -func NewSpanObserver( - metricsByEndpoint *MetricsByEndpoint, - operationName string, - options opentracing.StartSpanOptions, -) *SpanObserver { - so := &SpanObserver{ - metricsByEndpoint: metricsByEndpoint, - operationName: operationName, - startTime: options.StartTime, - } - for k, v := range options.Tags { - so.handleTagInLock(k, v) - } - return so -} - -// handleTags watches for special tags -// - SpanKind -// - HttpStatusCode -// - Error -func (so *SpanObserver) handleTagInLock(key string, value interface{}) { - if key == string(ext.SpanKind) { - if v, ok := value.(ext.SpanKindEnum); ok { - value = string(v) - } - if v, ok := value.(string); ok { - if v == string(ext.SpanKindRPCClientEnum) { - so.kind = Outbound - } else if v == string(ext.SpanKindRPCServerEnum) { - so.kind = Inbound - } - } - return - } - if key == string(ext.HTTPStatusCode) { - if v, ok := value.(uint16); ok { - so.httpStatusCode = v - } else if v, ok := value.(int); ok { - so.httpStatusCode = uint16(v) - } else if v, ok := value.(string); ok { - if vv, err := strconv.Atoi(v); err == nil { - so.httpStatusCode = uint16(vv) - } - } - return - } - if key == string(ext.Error) { - if v, ok := value.(bool); ok { - so.err = v - } else if v, ok := value.(string); ok { - if vv, err := strconv.ParseBool(v); err == nil { - so.err = vv - } - } - return - } -} - -// OnFinish emits the RPC metrics. It only has an effect when operation name -// is not blank, and the span kind is an RPC server. -func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) { - so.mux.Lock() - defer so.mux.Unlock() - - if so.operationName == "" || so.kind != Inbound { - return - } - - mets := so.metricsByEndpoint.get(so.operationName) - latency := options.FinishTime.Sub(so.startTime) - if so.err { - mets.RequestCountFailures.Inc(1) - mets.RequestLatencyFailures.Record(latency) - } else { - mets.RequestCountSuccess.Inc(1) - mets.RequestLatencySuccess.Record(latency) - } - mets.recordHTTPStatusCode(so.httpStatusCode) -} - -// OnSetOperationName records new operation name. -func (so *SpanObserver) OnSetOperationName(operationName string) { - so.mux.Lock() - so.operationName = operationName - so.mux.Unlock() -} - -// OnSetTag implements SpanObserver -func (so *SpanObserver) OnSetTag(key string, value interface{}) { - so.mux.Lock() - so.handleTagInLock(key, value) - so.mux.Unlock() -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go deleted file mode 100644 index d0be8ad500..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "math" - "strings" - "sync" - - "github.com/uber/jaeger-client-go/thrift-gen/sampling" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - defaultMaxOperations = 2000 -) - -// Sampler decides whether a new trace should be sampled or not. -type Sampler interface { - // IsSampled decides whether a trace with given `id` and `operation` - // should be sampled. This function will also return the tags that - // can be used to identify the type of sampling that was applied to - // the root span. Most simple samplers would return two tags, - // sampler.type and sampler.param, similar to those used in the Configuration - IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) - - // Close does a clean shutdown of the sampler, stopping any background - // go-routines it may have started. - Close() - - // Equal checks if the `other` sampler is functionally equivalent - // to this sampler. - // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation. - Equal(other Sampler) bool -} - -// ----------------------- - -// ConstSampler is a sampler that always makes the same decision. -type ConstSampler struct { - legacySamplerV1Base - Decision bool - tags []Tag -} - -// NewConstSampler creates a ConstSampler. -func NewConstSampler(sample bool) *ConstSampler { - tags := []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeConst}, - {key: SamplerParamTagKey, value: sample}, - } - s := &ConstSampler{ - Decision: sample, - tags: tags, - } - s.delegate = s.IsSampled - return s -} - -// IsSampled implements IsSampled() of Sampler. -func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.Decision, s.tags -} - -// Close implements Close() of Sampler. -func (s *ConstSampler) Close() { - // nothing to do -} - -// Equal implements Equal() of Sampler. -func (s *ConstSampler) Equal(other Sampler) bool { - if o, ok := other.(*ConstSampler); ok { - return s.Decision == o.Decision - } - return false -} - -// String is used to log sampler details. -func (s *ConstSampler) String() string { - return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision) -} - -// ----------------------- - -// ProbabilisticSampler is a sampler that randomly samples a certain percentage -// of traces. -type ProbabilisticSampler struct { - legacySamplerV1Base - samplingRate float64 - samplingBoundary uint64 - tags []Tag -} - -const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff - -// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the -// samplingRate, in the range between 0.0 and 1.0. -// -// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision -// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63). -// TODO remove the error from this function for next major release -func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) { - if samplingRate < 0.0 || samplingRate > 1.0 { - return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) - } - return newProbabilisticSampler(samplingRate), nil -} - -func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler { - s := new(ProbabilisticSampler) - s.delegate = s.IsSampled - return s.init(samplingRate) -} - -func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler { - s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0)) - s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate) - s.tags = []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic}, - {key: SamplerParamTagKey, value: s.samplingRate}, - } - return s -} - -// SamplingRate returns the sampling probability this sampled was constructed with. -func (s *ProbabilisticSampler) SamplingRate() float64 { - return s.samplingRate -} - -// IsSampled implements IsSampled() of Sampler. -func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags -} - -// Close implements Close() of Sampler. -func (s *ProbabilisticSampler) Close() { - // nothing to do -} - -// Equal implements Equal() of Sampler. -func (s *ProbabilisticSampler) Equal(other Sampler) bool { - if o, ok := other.(*ProbabilisticSampler); ok { - return s.samplingBoundary == o.samplingBoundary - } - return false -} - -// Update modifies in-place the sampling rate. Locking must be done externally. -func (s *ProbabilisticSampler) Update(samplingRate float64) error { - if samplingRate < 0.0 || samplingRate > 1.0 { - return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate) - } - s.init(samplingRate) - return nil -} - -// String is used to log sampler details. -func (s *ProbabilisticSampler) String() string { - return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate) -} - -// ----------------------- - -// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows -// burstiness of the service, i.e. a service with uniformly distributed requests will have those -// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a -// number of sequential requests can be sampled each second. -type RateLimitingSampler struct { - legacySamplerV1Base - maxTracesPerSecond float64 - rateLimiter *utils.ReconfigurableRateLimiter - tags []Tag -} - -// NewRateLimitingSampler creates new RateLimitingSampler. -func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler { - s := new(RateLimitingSampler) - s.delegate = s.IsSampled - return s.init(maxTracesPerSecond) -} - -func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler { - if s.rateLimiter == nil { - s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) - } else { - s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)) - } - s.maxTracesPerSecond = maxTracesPerSecond - s.tags = []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting}, - {key: SamplerParamTagKey, value: maxTracesPerSecond}, - } - return s -} - -// IsSampled implements IsSampled() of Sampler. -func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return s.rateLimiter.CheckCredit(1.0), s.tags -} - -// Update reconfigures the rate limiter, while preserving its accumulated balance. -// Locking must be done externally. -func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) { - if s.maxTracesPerSecond != maxTracesPerSecond { - s.init(maxTracesPerSecond) - } -} - -// Close does nothing. -func (s *RateLimitingSampler) Close() { - // nothing to do -} - -// Equal compares with another sampler. -func (s *RateLimitingSampler) Equal(other Sampler) bool { - if o, ok := other.(*RateLimitingSampler); ok { - return s.maxTracesPerSecond == o.maxTracesPerSecond - } - return false -} - -// String is used to log sampler details. -func (s *RateLimitingSampler) String() string { - return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond) -} - -// ----------------------- - -// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and -// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that -// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound -// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes. -// -// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both -// samplers return true, the tags for ProbabilisticSampler will be used. -type GuaranteedThroughputProbabilisticSampler struct { - probabilisticSampler *ProbabilisticSampler - lowerBoundSampler *RateLimitingSampler - tags []Tag - samplingRate float64 - lowerBound float64 -} - -// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both -// ProbabilisticSampler and RateLimitingSampler. -func NewGuaranteedThroughputProbabilisticSampler( - lowerBound, samplingRate float64, -) (*GuaranteedThroughputProbabilisticSampler, error) { - return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil -} - -func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler { - s := &GuaranteedThroughputProbabilisticSampler{ - lowerBoundSampler: NewRateLimitingSampler(lowerBound), - lowerBound: lowerBound, - } - s.setProbabilisticSampler(samplingRate) - return s -} - -func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) { - if s.probabilisticSampler == nil { - s.probabilisticSampler = newProbabilisticSampler(samplingRate) - } else if s.samplingRate != samplingRate { - s.probabilisticSampler.init(samplingRate) - } - // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval - samplingRate = s.probabilisticSampler.SamplingRate() - if s.samplingRate != samplingRate || s.tags == nil { - s.samplingRate = s.probabilisticSampler.SamplingRate() - s.tags = []Tag{ - {key: SamplerTypeTagKey, value: SamplerTypeLowerBound}, - {key: SamplerParamTagKey, value: s.samplingRate}, - } - } -} - -// IsSampled implements IsSampled() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled { - s.lowerBoundSampler.IsSampled(id, operation) - return true, tags - } - sampled, _ := s.lowerBoundSampler.IsSampled(id, operation) - return sampled, s.tags -} - -// Close implements Close() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) Close() { - s.probabilisticSampler.Close() - s.lowerBoundSampler.Close() -} - -// Equal implements Equal() of Sampler. -func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for - // more information. - return false -} - -// this function should only be called while holding a Write lock -func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) { - s.setProbabilisticSampler(samplingRate) - if s.lowerBound != lowerBound { - s.lowerBoundSampler.Update(lowerBound) - s.lowerBound = lowerBound - } -} - -func (s GuaranteedThroughputProbabilisticSampler) String() string { - return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate) -} - -// ----------------------- - -// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler -// on a per-operation basis. -type PerOperationSampler struct { - sync.RWMutex - - samplers map[string]*GuaranteedThroughputProbabilisticSampler - defaultSampler *ProbabilisticSampler - lowerBound float64 - maxOperations int - - // see description in PerOperationSamplerParams - operationNameLateBinding bool -} - -// NewAdaptiveSampler returns a new PerOperationSampler. -// Deprecated: please use NewPerOperationSampler. -func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) { - return NewPerOperationSampler(PerOperationSamplerParams{ - MaxOperations: maxOperations, - Strategies: strategies, - }), nil -} - -// PerOperationSamplerParams defines parameters when creating PerOperationSampler. -type PerOperationSamplerParams struct { - // Max number of operations that will be tracked. Other operations will be given default strategy. - MaxOperations int - - // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName. - // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving - // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance - // in applications that always provide the correct span name on trace creation. - // - // For backwards compatibility this option is off by default. - OperationNameLateBinding bool - - // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler). - Strategies *sampling.PerOperationSamplingStrategies -} - -// NewPerOperationSampler returns a new PerOperationSampler. -func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler { - if params.MaxOperations <= 0 { - params.MaxOperations = defaultMaxOperations - } - samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler) - for _, strategy := range params.Strategies.PerOperationStrategies { - sampler := newGuaranteedThroughputProbabilisticSampler( - params.Strategies.DefaultLowerBoundTracesPerSecond, - strategy.ProbabilisticSampling.SamplingRate, - ) - samplers[strategy.Operation] = sampler - } - return &PerOperationSampler{ - samplers: samplers, - defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability), - lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond, - maxOperations: params.MaxOperations, - operationNameLateBinding: params.OperationNameLateBinding, - } -} - -// IsSampled is not used and only exists to match Sampler V1 API. -// TODO (breaking change) remove when upgrading everything to SamplerV2 -func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return false, nil -} - -func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) { - samplerV1 := s.getSamplerForOperation(operationName) - var sampled bool - var tags []Tag - if span.context.samplingState.isLocalRootSpan(span.context.spanID) { - sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName) - } - return sampled, tags -} - -// OnCreateSpan implements OnCreateSpan of SamplerV2. -func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision { - sampled, tags := s.trySampling(span, span.OperationName()) - return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags} -} - -// OnSetOperationName implements OnSetOperationName of SamplerV2. -func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { - sampled, tags := s.trySampling(span, operationName) - return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags} -} - -// OnSetTag implements OnSetTag of SamplerV2. -func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { - return SamplingDecision{Sample: false, Retryable: true} -} - -// OnFinishSpan implements OnFinishSpan of SamplerV2. -func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision { - return SamplingDecision{Sample: false, Retryable: true} -} - -func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler { - s.RLock() - sampler, ok := s.samplers[operation] - if ok { - defer s.RUnlock() - return sampler - } - s.RUnlock() - s.Lock() - defer s.Unlock() - - // Check if sampler has already been created - sampler, ok = s.samplers[operation] - if ok { - return sampler - } - // Store only up to maxOperations of unique ops. - if len(s.samplers) >= s.maxOperations { - return s.defaultSampler - } - newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate()) - s.samplers[operation] = newSampler - return newSampler -} - -// Close invokes Close on all underlying samplers. -func (s *PerOperationSampler) Close() { - s.Lock() - defer s.Unlock() - for _, sampler := range s.samplers { - sampler.Close() - } - s.defaultSampler.Close() -} - -func (s *PerOperationSampler) String() string { - var sb strings.Builder - - fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler) - fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound) - fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations) - fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding) - fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers)) - fmt.Fprintf(&sb, "samplers=[") - for operationName, sampler := range s.samplers { - fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler) - } - fmt.Fprintf(&sb, "])") - - return sb.String() -} - -// Equal is not used. -// TODO (breaking change) remove this in the future -func (s *PerOperationSampler) Equal(other Sampler) bool { - // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple - // samplers which all need to be initialized before this function can be called for a comparison. - // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need - // changing. Hence this function always returns false so that the update function can be called. - // Once the Equal() function is removed from the Sampler API, this will no longer be needed. - return false -} - -func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) { - s.Lock() - defer s.Unlock() - newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{} - for _, strategy := range strategies.PerOperationStrategies { - operation := strategy.Operation - samplingRate := strategy.ProbabilisticSampling.SamplingRate - lowerBound := strategies.DefaultLowerBoundTracesPerSecond - if sampler, ok := s.samplers[operation]; ok { - sampler.update(lowerBound, samplingRate) - newSamplers[operation] = sampler - } else { - sampler := newGuaranteedThroughputProbabilisticSampler( - lowerBound, - samplingRate, - ) - newSamplers[operation] = sampler - } - } - s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond - if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability { - s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability) - } - s.samplers = newSamplers -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go deleted file mode 100644 index 119f0a1bb6..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "sync" - "sync/atomic" - "time" - - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/thrift-gen/sampling" -) - -const ( - defaultRemoteSamplingTimeout = 10 * time.Second - defaultSamplingRefreshInterval = time.Minute -) - -// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server. -type SamplingStrategyFetcher interface { - Fetch(service string) ([]byte, error) -} - -// SamplingStrategyParser is used to parse sampling strategy updates. The output object -// should be of the type that is recognized by the SamplerUpdaters. -type SamplingStrategyParser interface { - Parse(response []byte) (interface{}, error) -} - -// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies, -// retrieved from remote config server, to the current sampler. The updater can modify -// the sampler in-place if sampler supports it, or create a new one. -// -// If the strategy does not contain configuration for the sampler in question, -// updater must return modifiedSampler=nil to give other updaters a chance to inspect -// the sampling strategy response. -// -// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler. -type SamplerUpdater interface { - Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error) -} - -// RemotelyControlledSampler is a delegating sampler that polls a remote server -// for the appropriate sampling strategy, constructs a corresponding sampler and -// delegates to it for sampling decisions. -type RemotelyControlledSampler struct { - // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment. - // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq - closed int64 // 0 - not closed, 1 - closed - - sync.RWMutex // used to serialize access to samplerOptions.sampler - samplerOptions - - serviceName string - doneChan chan *sync.WaitGroup -} - -// NewRemotelyControlledSampler creates a sampler that periodically pulls -// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent). -func NewRemotelyControlledSampler( - serviceName string, - opts ...SamplerOption, -) *RemotelyControlledSampler { - options := new(samplerOptions).applyOptionsAndDefaults(opts...) - sampler := &RemotelyControlledSampler{ - samplerOptions: *options, - serviceName: serviceName, - doneChan: make(chan *sync.WaitGroup), - } - go sampler.pollController() - return sampler -} - -// IsSampled implements IsSampled() of Sampler. -// TODO (breaking change) remove when Sampler V1 is removed -func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) { - return false, nil -} - -// OnCreateSpan implements OnCreateSpan of SamplerV2. -func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision { - s.RLock() - defer s.RUnlock() - return s.sampler.OnCreateSpan(span) -} - -// OnSetOperationName implements OnSetOperationName of SamplerV2. -func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision { - s.RLock() - defer s.RUnlock() - return s.sampler.OnSetOperationName(span, operationName) -} - -// OnSetTag implements OnSetTag of SamplerV2. -func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { - s.RLock() - defer s.RUnlock() - return s.sampler.OnSetTag(span, key, value) -} - -// OnFinishSpan implements OnFinishSpan of SamplerV2. -func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision { - s.RLock() - defer s.RUnlock() - return s.sampler.OnFinishSpan(span) -} - -// Close implements Close() of Sampler. -func (s *RemotelyControlledSampler) Close() { - if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped { - s.logger.Error("Repeated attempt to close the sampler is ignored") - return - } - - var wg sync.WaitGroup - wg.Add(1) - s.doneChan <- &wg - wg.Wait() -} - -// Equal implements Equal() of Sampler. -func (s *RemotelyControlledSampler) Equal(other Sampler) bool { - // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for - // more information. - return false -} - -func (s *RemotelyControlledSampler) pollController() { - ticker := time.NewTicker(s.samplingRefreshInterval) - defer ticker.Stop() - s.pollControllerWithTicker(ticker) -} - -func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) { - for { - select { - case <-ticker.C: - s.UpdateSampler() - case wg := <-s.doneChan: - wg.Done() - return - } - } -} - -// Sampler returns the currently active sampler. -func (s *RemotelyControlledSampler) Sampler() SamplerV2 { - s.RLock() - defer s.RUnlock() - return s.sampler -} -func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) { - s.Lock() - defer s.Unlock() - s.sampler = sampler -} - -// UpdateSampler forces the sampler to fetch sampling strategy from backend server. -// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests. -func (s *RemotelyControlledSampler) UpdateSampler() { - res, err := s.samplingFetcher.Fetch(s.serviceName) - if err != nil { - s.metrics.SamplerQueryFailure.Inc(1) - s.logger.Infof("failed to fetch sampling strategy: %v", err) - return - } - strategy, err := s.samplingParser.Parse(res) - if err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("failed to parse sampling strategy response: %v", err) - return - } - - s.Lock() - defer s.Unlock() - - s.metrics.SamplerRetrieved.Inc(1) - if err := s.updateSamplerViaUpdaters(strategy); err != nil { - s.metrics.SamplerUpdateFailure.Inc(1) - s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err) - return - } - s.metrics.SamplerUpdated.Inc(1) -} - -// NB: this function should only be called while holding a Write lock -func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error { - for _, updater := range s.updaters { - sampler, err := updater.Update(s.sampler, strategy) - if err != nil { - return err - } - if sampler != nil { - s.logger.Debugf("sampler updated: %+v", sampler) - s.sampler = sampler - return nil - } - } - return fmt.Errorf("unsupported sampling strategy %+v", strategy) -} - -// ----------------------- - -// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. -type ProbabilisticSamplerUpdater struct{} - -// Update implements Update of SamplerUpdater. -func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { - type response interface { - GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy - } - var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check - if resp, ok := strategy.(response); ok { - if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil { - if ps, ok := sampler.(*ProbabilisticSampler); ok { - if err := ps.Update(probabilistic.SamplingRate); err != nil { - return nil, err - } - return sampler, nil - } - return newProbabilisticSampler(probabilistic.SamplingRate), nil - } - } - return nil, nil -} - -// ----------------------- - -// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. -type RateLimitingSamplerUpdater struct{} - -// Update implements Update of SamplerUpdater. -func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { - type response interface { - GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy - } - var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check - if resp, ok := strategy.(response); ok { - if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil { - rateLimit := float64(rateLimiting.MaxTracesPerSecond) - if rl, ok := sampler.(*RateLimitingSampler); ok { - rl.Update(rateLimit) - return rl, nil - } - return NewRateLimitingSampler(rateLimit), nil - } - } - return nil, nil -} - -// ----------------------- - -// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration. -// Fields have the same meaning as in PerOperationSamplerParams. -type AdaptiveSamplerUpdater struct { - MaxOperations int - OperationNameLateBinding bool -} - -// Update implements Update of SamplerUpdater. -func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) { - type response interface { - GetOperationSampling() *sampling.PerOperationSamplingStrategies - } - var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check - if p, ok := strategy.(response); ok { - if operations := p.GetOperationSampling(); operations != nil { - if as, ok := sampler.(*PerOperationSampler); ok { - as.update(operations) - return as, nil - } - return NewPerOperationSampler(PerOperationSamplerParams{ - MaxOperations: u.MaxOperations, - OperationNameLateBinding: u.OperationNameLateBinding, - Strategies: operations, - }), nil - } - } - return nil, nil -} - -// ----------------------- - -type httpSamplingStrategyFetcher struct { - serverURL string - logger log.DebugLogger - httpClient http.Client -} - -func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher { - customTransport := http.DefaultTransport.(*http.Transport).Clone() - customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout - - return &httpSamplingStrategyFetcher{ - serverURL: serverURL, - logger: logger, - httpClient: http.Client{ - Transport: customTransport, - }, - } -} - -func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) { - v := url.Values{} - v.Set("service", serviceName) - uri := f.serverURL + "?" + v.Encode() - - resp, err := f.httpClient.Get(uri) - if err != nil { - return nil, err - } - - defer func() { - if err := resp.Body.Close(); err != nil { - f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err)) - } - }() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) - } - - return body, nil -} - -// ----------------------- - -type samplingStrategyParser struct{} - -func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) { - strategy := new(sampling.SamplingStrategyResponse) - if err := json.Unmarshal(response, strategy); err != nil { - return nil, err - } - return strategy, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go deleted file mode 100644 index 64b028bf32..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/uber/jaeger-client-go/log" -) - -// SamplerOption is a function that sets some option on the sampler -type SamplerOption func(options *samplerOptions) - -// SamplerOptions is a factory for all available SamplerOption's. -var SamplerOptions SamplerOptionsFactory - -// SamplerOptionsFactory is a factory for all available SamplerOption's. -// The type acts as a namespace for factory functions. It is public to -// make the functions discoverable via godoc. Recommended to be used -// via global SamplerOptions variable. -type SamplerOptionsFactory struct{} - -type samplerOptions struct { - metrics *Metrics - sampler SamplerV2 - logger log.DebugLogger - samplingServerURL string - samplingRefreshInterval time.Duration - samplingFetcher SamplingStrategyFetcher - samplingParser SamplingStrategyParser - updaters []SamplerUpdater - posParams PerOperationSamplerParams -} - -// Metrics creates a SamplerOption that initializes Metrics on the sampler, -// which is used to emit statistics. -func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption { - return func(o *samplerOptions) { - o.metrics = m - } -} - -// MaxOperations creates a SamplerOption that sets the maximum number of -// operations the sampler will keep track of. -func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption { - return func(o *samplerOptions) { - o.posParams.MaxOperations = maxOperations - } -} - -// OperationNameLateBinding creates a SamplerOption that sets the respective -// field in the PerOperationSamplerParams. -func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption { - return func(o *samplerOptions) { - o.posParams.OperationNameLateBinding = enable - } -} - -// InitialSampler creates a SamplerOption that sets the initial sampler -// to use before a remote sampler is created and used. -func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption { - return func(o *samplerOptions) { - o.sampler = samplerV1toV2(sampler) - } -} - -// Logger creates a SamplerOption that sets the logger used by the sampler. -func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption { - return func(o *samplerOptions) { - o.logger = log.DebugLogAdapter(logger) - } -} - -// SamplingServerURL creates a SamplerOption that sets the sampling server url -// of the local agent that contains the sampling strategies. -func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption { - return func(o *samplerOptions) { - o.samplingServerURL = samplingServerURL - } -} - -// SamplingRefreshInterval creates a SamplerOption that sets how often the -// sampler will poll local agent for the appropriate sampling strategy. -func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption { - return func(o *samplerOptions) { - o.samplingRefreshInterval = samplingRefreshInterval - } -} - -// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher. -func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption { - return func(o *samplerOptions) { - o.samplingFetcher = fetcher - } -} - -// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser. -func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption { - return func(o *samplerOptions) { - o.samplingParser = parser - } -} - -// Updaters creates a SamplerOption that initializes sampler updaters. -func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption { - return func(o *samplerOptions) { - o.updaters = updaters - } -} - -func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions { - for _, option := range opts { - option(o) - } - if o.sampler == nil { - o.sampler = newProbabilisticSampler(0.001) - } - if o.logger == nil { - o.logger = log.NullLogger - } - if o.samplingServerURL == "" { - o.samplingServerURL = DefaultSamplingServerURL - } - if o.metrics == nil { - o.metrics = NewNullMetrics() - } - if o.samplingRefreshInterval <= 0 { - o.samplingRefreshInterval = defaultSamplingRefreshInterval - } - if o.samplingFetcher == nil { - o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger) - } - if o.samplingParser == nil { - o.samplingParser = new(samplingStrategyParser) - } - if o.updaters == nil { - o.updaters = []SamplerUpdater{ - &AdaptiveSamplerUpdater{ - MaxOperations: o.posParams.MaxOperations, - OperationNameLateBinding: o.posParams.OperationNameLateBinding, - }, - new(ProbabilisticSamplerUpdater), - new(RateLimitingSamplerUpdater), - } - } - return o -} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go deleted file mode 100644 index a50671a236..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -// SamplingDecision is returned by the V2 samplers. -type SamplingDecision struct { - Sample bool - Retryable bool - Tags []Tag -} - -// SamplerV2 is an extension of the V1 samplers that allows sampling decisions -// be made at different points of the span lifecycle. -type SamplerV2 interface { - OnCreateSpan(span *Span) SamplingDecision - OnSetOperationName(span *Span, operationName string) SamplingDecision - OnSetTag(span *Span, key string, value interface{}) SamplingDecision - OnFinishSpan(span *Span) SamplingDecision - - // Close does a clean shutdown of the sampler, stopping any background - // go-routines it may have started. - Close() -} - -// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2. -func samplerV1toV2(s Sampler) SamplerV2 { - if s2, ok := s.(SamplerV2); ok { - return s2 - } - type legacySamplerV1toV2Adapter struct { - legacySamplerV1Base - } - return &legacySamplerV1toV2Adapter{ - legacySamplerV1Base: legacySamplerV1Base{ - delegate: s.IsSampled, - }, - } -} - -// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods. -// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler -// for backwards compatibility reasons. -// TODO (breaking change) remove this in the next major release -type SamplerV2Base struct{} - -// IsSampled implements IsSampled of Sampler. -func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) { - return false, nil -} - -// Close implements Close of Sampler. -func (SamplerV2Base) Close() {} - -// Equal implements Equal of Sampler. -func (SamplerV2Base) Equal(other Sampler) bool { return false } - -// legacySamplerV1Base is used as a base for simple samplers that only implement -// the legacy isSampled() function that is not sensitive to its arguments. -type legacySamplerV1Base struct { - delegate func(id TraceID, operation string) (sampled bool, tags []Tag) -} - -func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision { - isSampled, tags := s.delegate(span.context.traceID, span.operationName) - return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} -} - -func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision { - isSampled, tags := s.delegate(span.context.traceID, span.operationName) - return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags} -} - -func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision { - return SamplingDecision{Sample: false, Retryable: true} -} - -func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision { - return SamplingDecision{Sample: false, Retryable: true} -} - -func (s *legacySamplerV1Base) Close() {} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go deleted file mode 100644 index 997cffdd88..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/span.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -// Span implements opentracing.Span -type Span struct { - // referenceCounter used to increase the lifetime of - // the object before return it into the pool. - referenceCounter int32 - - sync.RWMutex - - tracer *Tracer - - // TODO: (breaking change) change to use a pointer - context SpanContext - - // The name of the "operation" this span is an instance of. - // Known as a "span name" in some implementations. - operationName string - - // firstInProcess, if true, indicates that this span is the root of the (sub)tree - // of spans in the current process. In other words it's true for the root spans, - // and the ingress spans when the process joins another trace. - firstInProcess bool - - // startTime is the timestamp indicating when the span began, with microseconds precision. - startTime time.Time - - // duration returns duration of the span with microseconds precision. - // Zero value means duration is unknown. - duration time.Duration - - // tags attached to this span - tags []Tag - - // The span's "micro-log" - logs []opentracing.LogRecord - - // The number of logs dropped because of MaxLogsPerSpan. - numDroppedLogs int - - // references for this span - references []Reference - - observer ContribSpanObserver -} - -// Tag is a simple key value wrapper. -// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. -type Tag struct { - key string - value interface{} -} - -// NewTag creates a new Tag. -// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. -func NewTag(key string, value interface{}) Tag { - return Tag{key: key, value: value} -} - -// SetOperationName sets or changes the operation name. -func (s *Span) SetOperationName(operationName string) opentracing.Span { - s.Lock() - s.operationName = operationName - ctx := s.context - s.Unlock() - if !ctx.isSamplingFinalized() { - decision := s.tracer.sampler.OnSetOperationName(s, operationName) - s.applySamplingDecision(decision, true) - } - s.observer.OnSetOperationName(operationName) - return s -} - -// SetTag implements SetTag() of opentracing.Span -func (s *Span) SetTag(key string, value interface{}) opentracing.Span { - return s.setTagInternal(key, value, true) -} - -func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span { - var ctx SpanContext - var operationName string - if lock { - ctx = s.SpanContext() - operationName = s.OperationName() - } else { - ctx = s.context - operationName = s.operationName - } - - s.observer.OnSetTag(key, value) - if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) { - return s - } - if !ctx.isSamplingFinalized() { - decision := s.tracer.sampler.OnSetTag(s, key, value) - s.applySamplingDecision(decision, lock) - } - if ctx.isWriteable() { - if lock { - s.Lock() - defer s.Unlock() - } - s.appendTagNoLocking(key, value) - } - return s -} - -// SpanContext returns span context -func (s *Span) SpanContext() SpanContext { - s.Lock() - defer s.Unlock() - return s.context -} - -// StartTime returns span start time -func (s *Span) StartTime() time.Time { - s.Lock() - defer s.Unlock() - return s.startTime -} - -// Duration returns span duration -func (s *Span) Duration() time.Duration { - s.Lock() - defer s.Unlock() - return s.duration -} - -// Tags returns tags for span -func (s *Span) Tags() opentracing.Tags { - s.Lock() - defer s.Unlock() - var result = make(opentracing.Tags, len(s.tags)) - for _, tag := range s.tags { - result[tag.key] = tag.value - } - return result -} - -// Logs returns micro logs for span -func (s *Span) Logs() []opentracing.LogRecord { - s.Lock() - defer s.Unlock() - - logs := append([]opentracing.LogRecord(nil), s.logs...) - if s.numDroppedLogs != 0 { - fixLogs(logs, s.numDroppedLogs) - } - - return logs -} - -// References returns references for this span -func (s *Span) References() []opentracing.SpanReference { - s.Lock() - defer s.Unlock() - - if s.references == nil || len(s.references) == 0 { - return nil - } - - result := make([]opentracing.SpanReference, len(s.references)) - for i, r := range s.references { - result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context} - } - return result -} - -func (s *Span) appendTagNoLocking(key string, value interface{}) { - s.tags = append(s.tags, Tag{key: key, value: value}) -} - -// LogFields implements opentracing.Span API -func (s *Span) LogFields(fields ...log.Field) { - s.Lock() - defer s.Unlock() - if !s.context.IsSampled() { - return - } - s.logFieldsNoLocking(fields...) -} - -// this function should only be called while holding a Write lock -func (s *Span) logFieldsNoLocking(fields ...log.Field) { - lr := opentracing.LogRecord{ - Fields: fields, - Timestamp: time.Now(), - } - s.appendLogNoLocking(lr) -} - -// LogKV implements opentracing.Span API -func (s *Span) LogKV(alternatingKeyValues ...interface{}) { - s.RLock() - sampled := s.context.IsSampled() - s.RUnlock() - if !sampled { - return - } - fields, err := log.InterleavedKVToFields(alternatingKeyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -// LogEvent implements opentracing.Span API -func (s *Span) LogEvent(event string) { - s.Log(opentracing.LogData{Event: event}) -} - -// LogEventWithPayload implements opentracing.Span API -func (s *Span) LogEventWithPayload(event string, payload interface{}) { - s.Log(opentracing.LogData{Event: event, Payload: payload}) -} - -// Log implements opentracing.Span API -func (s *Span) Log(ld opentracing.LogData) { - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - if ld.Timestamp.IsZero() { - ld.Timestamp = s.tracer.timeNow() - } - s.appendLogNoLocking(ld.ToLogRecord()) - } -} - -// this function should only be called while holding a Write lock -func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) { - maxLogs := s.tracer.options.maxLogsPerSpan - if maxLogs == 0 || len(s.logs) < maxLogs { - s.logs = append(s.logs, lr) - return - } - - // We have too many logs. We don't touch the first numOld logs; we treat the - // rest as a circular buffer and overwrite the oldest log among those. - numOld := (maxLogs - 1) / 2 - numNew := maxLogs - numOld - s.logs[numOld+s.numDroppedLogs%numNew] = lr - s.numDroppedLogs++ -} - -// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at -// the end (i.e. pos circular left shifts). -func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { - // This algorithm is described in: - // http://www.cplusplus.com/reference/algorithm/rotate - for first, middle, next := 0, pos, pos; first != middle; { - buf[first], buf[next] = buf[next], buf[first] - first++ - next++ - if next == len(buf) { - next = middle - } else if first == middle { - middle = next - } - } -} - -func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) { - // We dropped some log events, which means that we used part of Logs as a - // circular buffer (see appendLog). De-circularize it. - numOld := (len(logs) - 1) / 2 - numNew := len(logs) - numOld - rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew) - - // Replace the log in the middle (the oldest "new" log) with information - // about the dropped logs. This means that we are effectively dropping one - // more "new" log. - numDropped := numDroppedLogs + 1 - logs[numOld] = opentracing.LogRecord{ - // Keep the timestamp of the last dropped event. - Timestamp: logs[numOld].Timestamp, - Fields: []log.Field{ - log.String("event", "dropped Span logs"), - log.Int("dropped_log_count", numDropped), - log.String("component", "jaeger-client"), - }, - } -} - -func (s *Span) fixLogsIfDropped() { - if s.numDroppedLogs == 0 { - return - } - fixLogs(s.logs, s.numDroppedLogs) - s.numDroppedLogs = 0 -} - -// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext. -// The call is proxied via tracer.baggageSetter to allow policies to be applied -// before allowing to set/replace baggage keys. -// The setter eventually stores a new SpanContext with extended baggage: -// -// span.context = span.context.WithBaggageItem(key, value) -// -// See SpanContext.WithBaggageItem() for explanation why it's done this way. -func (s *Span) SetBaggageItem(key, value string) opentracing.Span { - s.Lock() - defer s.Unlock() - s.tracer.setBaggage(s, key, value) - return s -} - -// BaggageItem implements BaggageItem() of opentracing.SpanContext -func (s *Span) BaggageItem(key string) string { - s.RLock() - defer s.RUnlock() - return s.context.baggage[key] -} - -// Finish implements opentracing.Span API -// After finishing the Span object it returns back to the allocator unless the reporter retains it again, -// so after that, the Span object should no longer be used because it won't be valid anymore. -func (s *Span) Finish() { - s.FinishWithOptions(opentracing.FinishOptions{}) -} - -// FinishWithOptions implements opentracing.Span API -func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { - if options.FinishTime.IsZero() { - options.FinishTime = s.tracer.timeNow() - } - s.observer.OnFinish(options) - s.Lock() - s.duration = options.FinishTime.Sub(s.startTime) - ctx := s.context - s.Unlock() - if !ctx.isSamplingFinalized() { - decision := s.tracer.sampler.OnFinishSpan(s) - s.applySamplingDecision(decision, true) - } - if ctx.IsSampled() { - s.Lock() - s.fixLogsIfDropped() - if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 { - // Note: bulk logs are not subject to maxLogsPerSpan limit - if options.LogRecords != nil { - s.logs = append(s.logs, options.LogRecords...) - } - for _, ld := range options.BulkLogData { - s.logs = append(s.logs, ld.ToLogRecord()) - } - } - s.Unlock() - } - // call reportSpan even for non-sampled traces, to return span to the pool - // and update metrics counter - s.tracer.reportSpan(s) -} - -// Context implements opentracing.Span API -func (s *Span) Context() opentracing.SpanContext { - s.Lock() - defer s.Unlock() - return s.context -} - -// Tracer implements opentracing.Span API -func (s *Span) Tracer() opentracing.Tracer { - return s.tracer -} - -func (s *Span) String() string { - s.RLock() - defer s.RUnlock() - return s.context.String() -} - -// OperationName allows retrieving current operation name. -func (s *Span) OperationName() string { - s.RLock() - defer s.RUnlock() - return s.operationName -} - -// Retain increases object counter to increase the lifetime of the object -func (s *Span) Retain() *Span { - atomic.AddInt32(&s.referenceCounter, 1) - return s -} - -// Release decrements object counter and return to the -// allocator manager when counter will below zero -func (s *Span) Release() { - if atomic.AddInt32(&s.referenceCounter, -1) == -1 { - s.tracer.spanAllocator.Put(s) - } -} - -// reset span state and release unused data -func (s *Span) reset() { - s.firstInProcess = false - s.context = emptyContext - s.operationName = "" - s.tracer = nil - s.startTime = time.Time{} - s.duration = 0 - s.observer = nil - atomic.StoreInt32(&s.referenceCounter, 0) - - // Note: To reuse memory we can save the pointers on the heap - s.tags = s.tags[:0] - s.logs = s.logs[:0] - s.numDroppedLogs = 0 - s.references = s.references[:0] -} - -func (s *Span) serviceName() string { - return s.tracer.serviceName -} - -func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) { - var ctx SpanContext - if lock { - ctx = s.SpanContext() - } else { - ctx = s.context - } - - if !decision.Retryable { - ctx.samplingState.setFinal() - } - if decision.Sample { - ctx.samplingState.setSampled() - if len(decision.Tags) > 0 { - if lock { - s.Lock() - defer s.Unlock() - } - for _, tag := range decision.Tags { - s.appendTagNoLocking(tag.key, tag.value) - } - } - } -} - -// setSamplingPriority returns true if the flag was updated successfully, false otherwise. -// The behavior of setSamplingPriority is surprising -// If noDebugFlagOnForcedSampling is set -// setSamplingPriority(..., 1) always sets only flagSampled -// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes -// setSamplingPriority(..., 1) sets both flagSampled and flagDebug -// However, -// setSamplingPriority(..., 0) always only resets flagSampled -// -// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can -// leave flagDebug set -func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool { - val, ok := value.(uint16) - if !ok { - return false - } - if val == 0 { - state.unsetSampled() - state.setFinal() - return true - } - if tracer.options.noDebugFlagOnForcedSampling { - state.setSampled() - state.setFinal() - return true - } else if tracer.isDebugAllowed(operationName) { - state.setDebugAndSampled() - state.setFinal() - return true - } - return false -} - -// EnableFirehose enables firehose flag on the span context -func EnableFirehose(s *Span) { - s.Lock() - defer s.Unlock() - s.context.samplingState.setFirehose() -} diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go deleted file mode 100644 index fba1e43379..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import "sync" - -// SpanAllocator abstraction of managing span allocations -type SpanAllocator interface { - Get() *Span - Put(*Span) -} - -type syncPollSpanAllocator struct { - spanPool sync.Pool -} - -func newSyncPollSpanAllocator() SpanAllocator { - return &syncPollSpanAllocator{ - spanPool: sync.Pool{New: func() interface{} { - return &Span{} - }}, - } -} - -func (pool *syncPollSpanAllocator) Get() *Span { - return pool.spanPool.Get().(*Span) -} - -func (pool *syncPollSpanAllocator) Put(span *Span) { - span.reset() - pool.spanPool.Put(span) -} - -type simpleSpanAllocator struct{} - -func (pool simpleSpanAllocator) Get() *Span { - return &Span{} -} - -func (pool simpleSpanAllocator) Put(span *Span) { - // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351 - // since finished spans are not reused, no need to reset them - // span.reset() -} diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go deleted file mode 100644 index 5b2307be91..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/span_context.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync" - - "go.uber.org/atomic" -) - -const ( - flagSampled = 1 - flagDebug = 2 - flagFirehose = 8 -) - -var ( - errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") - errMalformedTracerStateString = errors.New("String does not match tracer state format") - - emptyContext = SpanContext{} -) - -// TraceID represents unique 128bit identifier of a trace -type TraceID struct { - High, Low uint64 -} - -// SpanID represents unique 64bit identifier of a span -type SpanID uint64 - -// SpanContext represents propagated span identity and state -type SpanContext struct { - // traceID represents globally unique ID of the trace. - // Usually generated as a random number. - traceID TraceID - - // spanID represents span ID that must be unique within its trace, - // but does not have to be globally unique. - spanID SpanID - - // parentID refers to the ID of the parent span. - // Should be 0 if the current span is a root span. - parentID SpanID - - // Distributed Context baggage. The is a snapshot in time. - baggage map[string]string - - // debugID can be set to some correlation ID when the context is being - // extracted from a TextMap carrier. - // - // See JaegerDebugHeader in constants.go - debugID string - - // samplingState is shared across all spans - samplingState *samplingState - - // remote indicates that span context represents a remote parent - remote bool -} - -type samplingState struct { - // Span context's state flags that are propagated across processes. Only lower 8 bits are used. - // We use an int32 instead of byte to be able to use CAS operations. - stateFlags atomic.Int32 - - // When state is not final, sampling will be retried on other span write operations, - // like SetOperationName / SetTag, and the spans will remain writable. - final atomic.Bool - - // localRootSpan stores the SpanID of the first span created in this process for a given trace. - localRootSpan SpanID - - // extendedState allows samplers to keep intermediate state. - // The keys and values in this map are completely opaque: interface{} -> interface{}. - extendedState sync.Map -} - -func (s *samplingState) isLocalRootSpan(id SpanID) bool { - return id == s.localRootSpan -} - -func (s *samplingState) setFlag(newFlag int32) { - swapped := false - for !swapped { - old := s.stateFlags.Load() - swapped = s.stateFlags.CAS(old, old|newFlag) - } -} - -func (s *samplingState) unsetFlag(newFlag int32) { - swapped := false - for !swapped { - old := s.stateFlags.Load() - swapped = s.stateFlags.CAS(old, old&^newFlag) - } -} - -func (s *samplingState) setSampled() { - s.setFlag(flagSampled) -} - -func (s *samplingState) unsetSampled() { - s.unsetFlag(flagSampled) -} - -func (s *samplingState) setDebugAndSampled() { - s.setFlag(flagDebug | flagSampled) -} - -func (s *samplingState) setFirehose() { - s.setFlag(flagFirehose) -} - -func (s *samplingState) setFlags(flags byte) { - s.stateFlags.Store(int32(flags)) -} - -func (s *samplingState) setFinal() { - s.final.Store(true) -} - -func (s *samplingState) flags() byte { - return byte(s.stateFlags.Load()) -} - -func (s *samplingState) isSampled() bool { - return s.stateFlags.Load()&flagSampled == flagSampled -} - -func (s *samplingState) isDebug() bool { - return s.stateFlags.Load()&flagDebug == flagDebug -} - -func (s *samplingState) isFirehose() bool { - return s.stateFlags.Load()&flagFirehose == flagFirehose -} - -func (s *samplingState) isFinal() bool { - return s.final.Load() -} - -func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} { - if value, ok := s.extendedState.Load(key); ok { - return value - } - value := initValue() - value, _ = s.extendedState.LoadOrStore(key, value) - return value -} - -// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.baggage { - if !handler(k, v) { - break - } - } -} - -// IsSampled returns whether this trace was chosen for permanent storage -// by the sampling mechanism of the tracer. -func (c SpanContext) IsSampled() bool { - return c.samplingState.isSampled() -} - -// IsDebug indicates whether sampling was explicitly requested by the service. -func (c SpanContext) IsDebug() bool { - return c.samplingState.isDebug() -} - -// IsSamplingFinalized indicates whether the sampling decision has been finalized. -func (c SpanContext) IsSamplingFinalized() bool { - return c.samplingState.isFinal() -} - -// IsFirehose indicates whether the firehose flag was set -func (c SpanContext) IsFirehose() bool { - return c.samplingState.isFirehose() -} - -// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist, -// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler). -func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} { - return c.samplingState.extendedStateForKey(key, initValue) -} - -// IsValid indicates whether this context actually represents a valid trace. -func (c SpanContext) IsValid() bool { - return c.traceID.IsValid() && c.spanID != 0 -} - -// SetFirehose enables firehose mode for this trace. -func (c SpanContext) SetFirehose() { - c.samplingState.setFirehose() -} - -func (c SpanContext) String() string { - var flags int32 - if c.samplingState != nil { - flags = c.samplingState.stateFlags.Load() - } - if c.traceID.High == 0 { - return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) - } - return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags) -} - -// ContextFromString reconstructs the Context encoded in a string -func ContextFromString(value string) (SpanContext, error) { - var context SpanContext - if value == "" { - return emptyContext, errEmptyTracerStateString - } - parts := strings.Split(value, ":") - if len(parts) != 4 { - return emptyContext, errMalformedTracerStateString - } - var err error - if context.traceID, err = TraceIDFromString(parts[0]); err != nil { - return emptyContext, err - } - if context.spanID, err = SpanIDFromString(parts[1]); err != nil { - return emptyContext, err - } - if context.parentID, err = SpanIDFromString(parts[2]); err != nil { - return emptyContext, err - } - flags, err := strconv.ParseUint(parts[3], 10, 8) - if err != nil { - return emptyContext, err - } - context.samplingState = &samplingState{} - context.samplingState.setFlags(byte(flags)) - return context, nil -} - -// TraceID returns the trace ID of this span context -func (c SpanContext) TraceID() TraceID { - return c.traceID -} - -// SpanID returns the span ID of this span context -func (c SpanContext) SpanID() SpanID { - return c.spanID -} - -// ParentID returns the parent span ID of this span context -func (c SpanContext) ParentID() SpanID { - return c.parentID -} - -// Flags returns the bitmap containing such bits as 'sampled' and 'debug'. -func (c SpanContext) Flags() byte { - return c.samplingState.flags() -} - -// Span can be written to if it is sampled or the sampling decision has not been finalized. -func (c SpanContext) isWriteable() bool { - state := c.samplingState - return !state.isFinal() || state.isSampled() -} - -func (c SpanContext) isSamplingFinalized() bool { - return c.samplingState.isFinal() -} - -// NewSpanContext creates a new instance of SpanContext -func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - samplingState := &samplingState{} - if sampled { - samplingState.setSampled() - } - - return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - samplingState: samplingState, - baggage: baggage} -} - -// CopyFrom copies data from ctx into this context, including span identity and baggage. -// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. -func (c *SpanContext) CopyFrom(ctx *SpanContext) { - c.traceID = ctx.traceID - c.spanID = ctx.spanID - c.parentID = ctx.parentID - c.samplingState = ctx.samplingState - if l := len(ctx.baggage); l > 0 { - c.baggage = make(map[string]string, l) - for k, v := range ctx.baggage { - c.baggage[k] = v - } - } else { - c.baggage = nil - } -} - -// WithBaggageItem creates a new context with an extra baggage item. -// Delete a baggage item if provided blank value. -// -// The SpanContext is designed to be immutable and passed by value. As such, -// it cannot contain any locks, and should only hold immutable data, including baggage. -// Another reason for why baggage is immutable is when the span context is passed -// as a parent when starting a new span. The new span's baggage cannot affect the parent -// span's baggage, so the child span either needs to take a copy of the parent baggage -// (which is expensive and unnecessary since baggage rarely changes in the life span of -// a trace), or it needs to do a copy-on-write, which is the approach taken here. -func (c SpanContext) WithBaggageItem(key, value string) SpanContext { - var newBaggage map[string]string - // unset baggage item - if value == "" { - if _, ok := c.baggage[key]; !ok { - return c - } - newBaggage = make(map[string]string, len(c.baggage)) - for k, v := range c.baggage { - newBaggage[k] = v - } - delete(newBaggage, key) - return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} - } - if c.baggage == nil { - newBaggage = map[string]string{key: value} - } else { - newBaggage = make(map[string]string, len(c.baggage)+1) - for k, v := range c.baggage { - newBaggage[k] = v - } - newBaggage[key] = value - } - // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} -} - -// isDebugIDContainerOnly returns true when the instance of the context is only -// used to return the debug/correlation ID from extract() method. This happens -// in the situation when "jaeger-debug-id" header is passed in the carrier to -// the extract() method, but the request otherwise has no span context in it. -// Previously this would've returned opentracing.ErrSpanContextNotFound from the -// extract method, but now it returns a dummy context with only debugID filled in. -// -// See JaegerDebugHeader in constants.go -// See TextMapPropagator#Extract -func (c *SpanContext) isDebugIDContainerOnly() bool { - return !c.traceID.IsValid() && c.debugID != "" -} - -// ------- TraceID ------- - -func (t TraceID) String() string { - if t.High == 0 { - return fmt.Sprintf("%016x", t.Low) - } - return fmt.Sprintf("%016x%016x", t.High, t.Low) -} - -// TraceIDFromString creates a TraceID from a hexadecimal string -func TraceIDFromString(s string) (TraceID, error) { - var hi, lo uint64 - var err error - if len(s) > 32 { - return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) - } else if len(s) > 16 { - hiLen := len(s) - 16 - if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { - return TraceID{}, err - } - if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { - return TraceID{}, err - } - } else { - if lo, err = strconv.ParseUint(s, 16, 64); err != nil { - return TraceID{}, err - } - } - return TraceID{High: hi, Low: lo}, nil -} - -// IsValid checks if the trace ID is valid, i.e. not zero. -func (t TraceID) IsValid() bool { - return t.High != 0 || t.Low != 0 -} - -// ------- SpanID ------- - -func (s SpanID) String() string { - return fmt.Sprintf("%016x", uint64(s)) -} - -// SpanIDFromString creates a SpanID from a hexadecimal string -func SpanIDFromString(s string) (SpanID, error) { - if len(s) > 16 { - return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) - } - id, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return SpanID(0), err - } - return SpanID(id), nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go deleted file mode 100644 index a0df507797..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import( - "bytes" - "context" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" - -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ - -func init() { -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go deleted file mode 100644 index 6472e84e69..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package agent - -import( - "bytes" - "context" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" - -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -var _ = jaeger.GoUnusedProtection__ -var _ = zipkincore.GoUnusedProtection__ -type Agent interface { - // Parameters: - // - Spans - EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) - // Parameters: - // - Batch - EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) -} - -type AgentClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewAgentClient(c thrift.TClient) *AgentClient { - return &AgentClient{ - c: c, - } -} - -func (p *AgentClient) Client_() thrift.TClient { - return p.c -} - -func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { - var _args0 AgentEmitZipkinBatchArgs - _args0.Spans = spans - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { - return err - } - return nil -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { - var _args1 AgentEmitBatchArgs - _args1.Batch = batch - p.SetLastResponseMeta_(thrift.ResponseMeta{}) - if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { - return err - } - return nil -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler} - self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler} -return self2 -} - -func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x3.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x3 - -} - -type agentProcessorEmitZipkinBatch struct { - handler Agent -} - -func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - _ = tickerCancel - - if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { - tickerCancel() - return true, thrift.WrapTException(err2) - } - tickerCancel() - return true, nil -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type AgentEmitZipkinBatchArgs struct { - Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { - return &AgentEmitZipkinBatchArgs{} -} - - -func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { - return p.Spans -} -func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*zipkincore.Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i ++ { - _elem4 := &zipkincore.Span{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Spans = append(p.Spans, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) } - return err -} - -func (p *AgentEmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch -func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } -return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Batch = &jaeger.Batch{} - if err := p.Batch.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) } - if err := p.Batch.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} - - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go deleted file mode 100644 index 712b6a9da4..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package baggage - -var GoUnusedProtection__ int; - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go deleted file mode 100644 index 39b5a7ee79..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package baggage - -import( - "bytes" - "context" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - - -func init() { -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go deleted file mode 100644 index e4d89d5d51..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go +++ /dev/null @@ -1,565 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package baggage - -import( - "bytes" - "context" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -// Attributes: -// - BaggageKey -// - MaxValueLength -type BaggageRestriction struct { - BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"` - MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"` -} - -func NewBaggageRestriction() *BaggageRestriction { - return &BaggageRestriction{} -} - - -func (p *BaggageRestriction) GetBaggageKey() string { - return p.BaggageKey -} - -func (p *BaggageRestriction) GetMaxValueLength() int32 { - return p.MaxValueLength -} -func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBaggageKey bool = false; - var issetMaxValueLength bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetBaggageKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetMaxValueLength = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBaggageKey{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")); - } - if !issetMaxValueLength{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")); - } - return nil -} - -func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.BaggageKey = v -} - return nil -} - -func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.MaxValueLength = v -} - return nil -} - -func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) } - if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) } - return err -} - -func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) } - return err -} - -func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.BaggageKey != other.BaggageKey { return false } - if p.MaxValueLength != other.MaxValueLength { return false } - return true -} - -func (p *BaggageRestriction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestriction(%+v)", *p) -} - -type BaggageRestrictionManager interface { - // getBaggageRestrictions retrieves the baggage restrictions for a specific service. - // Usually, baggageRestrictions apply to all services however there may be situations - // where a baggageKey might only be allowed to be set by a specific service. - // - // Parameters: - // - ServiceName - GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) -} - -type BaggageRestrictionManagerClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: c, - } -} - -func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient { - return p.c -} - -func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// getBaggageRestrictions retrieves the baggage restrictions for a specific service. -// Usually, baggageRestrictions apply to all services however there may be situations -// where a baggageKey might only be allowed to be set by a specific service. -// -// Parameters: -// - ServiceName -func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) { - var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs - _args0.ServiceName = serviceName - var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult - var _meta1 thrift.ResponseMeta - _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2) - p.SetLastResponseMeta_(_meta1) - if _err != nil { - return - } - return _result2.GetSuccess(), nil -} - -type BaggageRestrictionManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BaggageRestrictionManager -} - -func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { - - self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler} -return self3 -} - -func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x4.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x4 - -} - -type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { - handler BaggageRestrictionManager -} - -func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - var retval []*BaggageRestriction - if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error()) - oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { - ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { - return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} -} - - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { - return p.ServiceName -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.ServiceName = v -} - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { - Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { - return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} -} - -var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { - return p.Success -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BaggageRestriction, 0, size) - p.Success = tSlice - for i := 0; i < size; i ++ { - _elem5 := &BaggageRestriction{} - if err := _elem5.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.Success = append(p.Success, _elem5) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) -} - - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go deleted file mode 100644 index d55cca0241..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go +++ /dev/null @@ -1,2698 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package jaeger - -import( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type TagType int64 -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: return "STRING" - case TagType_DOUBLE: return "DOUBLE" - case TagType_BOOL: return "BOOL" - case TagType_LONG: return "LONG" - case TagType_BINARY: return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": return TagType_STRING, nil - case "DOUBLE": return TagType_DOUBLE, nil - case "BOOL": return TagType_BOOL, nil - case "LONG": return TagType_LONG, nil - case "BINARY": return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { -q, err := TagTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *TagType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = TagType(v) -return nil -} - -func (p * TagType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -type SpanRefType int64 -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { -q, err := SpanRefTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *SpanRefType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = SpanRefType(v) -return nil -} - -func (p * SpanRefType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" db:"key" json:"key"` - VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` - VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} -var Tag_VStr_DEFAULT string -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } -return *p.VStr -} -var Tag_VDouble_DEFAULT float64 -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } -return *p.VDouble -} -var Tag_VBool_DEFAULT bool -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } -return *p.VBool -} -var Tag_VLong_DEFAULT int64 -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } -return *p.VLong -} -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false; - var issetVType bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetVType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")); - } - if !issetVType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")); - } - return nil -} - -func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Key = v -} - return nil -} - -func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := TagType(v) - p.VType = temp -} - return nil -} - -func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.VStr = &v -} - return nil -} - -func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.VDouble = &v -} - return nil -} - -func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.VBool = &v -} - return nil -} - -func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.VLong = &v -} - return nil -} - -func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.VBinary = v -} - return nil -} - -func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } - return err -} - -func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) } - return err -} - -func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) } - if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) } - } - return err -} - -func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) } - if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) } - } - return err -} - -func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) } - if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) } - } - return err -} - -func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) } - } - return err -} - -func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) } - if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) } - } - return err -} - -func (p *Tag) Equals(other *Tag) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { return false } - if p.VType != other.VType { return false } - if p.VStr != other.VStr { - if p.VStr == nil || other.VStr == nil { - return false - } - if (*p.VStr) != (*other.VStr) { return false } - } - if p.VDouble != other.VDouble { - if p.VDouble == nil || other.VDouble == nil { - return false - } - if (*p.VDouble) != (*other.VDouble) { return false } - } - if p.VBool != other.VBool { - if p.VBool == nil || other.VBool == nil { - return false - } - if (*p.VBool) != (*other.VBool) { return false } - } - if p.VLong != other.VLong { - if p.VLong == nil || other.VLong == nil { - return false - } - if (*p.VLong) != (*other.VLong) { return false } - } - if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false } - return true -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false; - var issetFields bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTimestamp = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetFields = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")); - } - if !issetFields{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")); - } - return nil -} - -func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Timestamp = v -} - return nil -} - -func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i ++ { - _elem0 := &Tag{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } - return err -} - -func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) } - return err -} - -func (p *Log) Equals(other *Log) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { return false } - if len(p.Fields) != len(other.Fields) { return false } - for i, _tgt := range p.Fields { - _src1 := other.Fields[i] - if !_tgt.Equals(_src1) { return false } - } - return true -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false; - var issetTraceIdLow bool = false; - var issetTraceIdHigh bool = false; - var issetSpanId bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetRefType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")); - } - if !issetTraceIdLow{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")); - } - if !issetTraceIdHigh{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")); - } - if !issetSpanId{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")); - } - return nil -} - -func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := SpanRefType(v) - p.RefType = temp -} - return nil -} - -func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TraceIdLow = v -} - return nil -} - -func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.TraceIdHigh = v -} - return nil -} - -func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.SpanId = v -} - return nil -} - -func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) } - return err -} - -func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) } - return err -} - -func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) } - return err -} - -func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) } - return err -} - -func (p *SpanRef) Equals(other *SpanRef) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.RefType != other.RefType { return false } - if p.TraceIdLow != other.TraceIdLow { return false } - if p.TraceIdHigh != other.TraceIdHigh { return false } - if p.SpanId != other.SpanId { return false } - return true -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` - References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` - Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` - Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false; - var issetTraceIdHigh bool = false; - var issetSpanId bool = false; - var issetParentSpanId bool = false; - var issetOperationName bool = false; - var issetFlags bool = false; - var issetStartTime bool = false; - var issetDuration bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetTraceIdLow = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTraceIdHigh = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetParentSpanId = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - issetOperationName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - issetFlags = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I64 { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - issetStartTime = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - issetDuration = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.LIST { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.LIST { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")); - } - if !issetTraceIdHigh{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")); - } - if !issetSpanId{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")); - } - if !issetParentSpanId{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")); - } - if !issetOperationName{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")); - } - if !issetFlags{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")); - } - if !issetStartTime{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")); - } - if !issetDuration{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")); - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.TraceIdLow = v -} - return nil -} - -func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TraceIdHigh = v -} - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.SpanId = v -} - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.ParentSpanId = v -} - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.OperationName = v -} - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i ++ { - _elem2 := &SpanRef{} - if err := _elem2.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.References = append(p.References, _elem2) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.Flags = v -} - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 8: ", err) -} else { - p.StartTime = v -} - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.Duration = v -} - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i ++ { - _elem3 := &Tag{} - if err := _elem3.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Tags = append(p.Tags, _elem3) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i ++ { - _elem4 := &Log{} - if err := _elem4.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Logs = append(p.Logs, _elem4) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - if err := p.writeField10(ctx, oprot); err != nil { return err } - if err := p.writeField11(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) } - return err -} - -func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) } - if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) } - } - return err -} - -func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceIdLow != other.TraceIdLow { return false } - if p.TraceIdHigh != other.TraceIdHigh { return false } - if p.SpanId != other.SpanId { return false } - if p.ParentSpanId != other.ParentSpanId { return false } - if p.OperationName != other.OperationName { return false } - if len(p.References) != len(other.References) { return false } - for i, _tgt := range p.References { - _src5 := other.References[i] - if !_tgt.Equals(_src5) { return false } - } - if p.Flags != other.Flags { return false } - if p.StartTime != other.StartTime { return false } - if p.Duration != other.Duration { return false } - if len(p.Tags) != len(other.Tags) { return false } - for i, _tgt := range p.Tags { - _src6 := other.Tags[i] - if !_tgt.Equals(_src6) { return false } - } - if len(p.Logs) != len(other.Logs) { return false } - for i, _tgt := range p.Logs { - _src7 := other.Logs[i] - if !_tgt.Equals(_src7) { return false } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - - -func (p *Process) GetServiceName() string { - return p.ServiceName -} -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetServiceName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")); - } - return nil -} - -func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.ServiceName = v -} - return nil -} - -func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i ++ { - _elem8 := &Tag{} - if err := _elem8.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) - } - p.Tags = append(p.Tags, _elem8) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } - return err -} - -func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) } - } - return err -} - -func (p *Process) Equals(other *Process) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.ServiceName != other.ServiceName { return false } - if len(p.Tags) != len(other.Tags) { return false } - for i, _tgt := range p.Tags { - _src9 := other.Tags[i] - if !_tgt.Equals(_src9) { return false } - } - return true -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - FullQueueDroppedSpans -// - TooLargeDroppedSpans -// - FailedToEmitSpans -type ClientStats struct { - FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` - TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` - FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` -} - -func NewClientStats() *ClientStats { - return &ClientStats{} -} - - -func (p *ClientStats) GetFullQueueDroppedSpans() int64 { - return p.FullQueueDroppedSpans -} - -func (p *ClientStats) GetTooLargeDroppedSpans() int64 { - return p.TooLargeDroppedSpans -} - -func (p *ClientStats) GetFailedToEmitSpans() int64 { - return p.FailedToEmitSpans -} -func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFullQueueDroppedSpans bool = false; - var issetTooLargeDroppedSpans bool = false; - var issetFailedToEmitSpans bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetFullQueueDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTooLargeDroppedSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetFailedToEmitSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFullQueueDroppedSpans{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")); - } - if !issetTooLargeDroppedSpans{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")); - } - if !issetFailedToEmitSpans{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")); - } - return nil -} - -func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.FullQueueDroppedSpans = v -} - return nil -} - -func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TooLargeDroppedSpans = v -} - return nil -} - -func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.FailedToEmitSpans = v -} - return nil -} - -func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) } - return err -} - -func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) } - return err -} - -func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) } - return err -} - -func (p *ClientStats) Equals(other *ClientStats) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false } - if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false } - if p.FailedToEmitSpans != other.FailedToEmitSpans { return false } - return true -} - -func (p *ClientStats) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ClientStats(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -// - SeqNo -// - Stats -type Batch struct { - Process *Process `thrift:"process,1,required" db:"process" json:"process"` - Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` - SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` - Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } -return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} -var Batch_SeqNo_DEFAULT int64 -func (p *Batch) GetSeqNo() int64 { - if !p.IsSetSeqNo() { - return Batch_SeqNo_DEFAULT - } -return *p.SeqNo -} -var Batch_Stats_DEFAULT *ClientStats -func (p *Batch) GetStats() *ClientStats { - if !p.IsSetStats() { - return Batch_Stats_DEFAULT - } -return p.Stats -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) IsSetSeqNo() bool { - return p.SeqNo != nil -} - -func (p *Batch) IsSetStats() bool { - return p.Stats != nil -} - -func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false; - var issetSpans bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetProcess = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetSpans = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")); - } - if !issetSpans{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")); - } - return nil -} - -func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i ++ { - _elem10 := &Span{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Spans = append(p.Spans, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.SeqNo = &v -} - return nil -} - -func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Stats = &ClientStats{} - if err := p.Stats.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) - } - return nil -} - -func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) } - if err := p.Process.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) } - return err -} - -func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) } - return err -} - -func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSeqNo() { - if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) } - } - return err -} - -func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStats() { - if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) } - if err := p.Stats.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) } - } - return err -} - -func (p *Batch) Equals(other *Batch) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.Process.Equals(other.Process) { return false } - if len(p.Spans) != len(other.Spans) { return false } - for i, _tgt := range p.Spans { - _src11 := other.Spans[i] - if !_tgt.Equals(_src11) { return false } - } - if p.SeqNo != other.SeqNo { - if p.SeqNo == nil || other.SeqNo == nil { - return false - } - if (*p.SeqNo) != (*other.SeqNo) { return false } - } - if !p.Stats.Equals(other.Stats) { return false } - return true -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")); - } - return nil -} - -func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Ok = v -} - return nil -} - -func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) } - return err -} - -func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { return false } - return true -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} - -type Collector interface { - // Parameters: - // - Batches - SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) -} - -type CollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewCollectorClient(c thrift.TClient) *CollectorClient { - return &CollectorClient{ - c: c, - } -} - -func (p *CollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Batches -func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { - var _args12 CollectorSubmitBatchesArgs - _args12.Batches = batches - var _result14 CollectorSubmitBatchesResult - var _meta13 thrift.ResponseMeta - _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) - p.SetLastResponseMeta_(_meta13) - if _err != nil { - return - } - return _result14.GetSuccess(), nil -} - -type CollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Collector -} - -func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewCollectorProcessor(handler Collector) *CollectorProcessor { - - self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler} -return self15 -} - -func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x16.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x16 - -} - -type collectorProcessorSubmitBatches struct { - handler Collector -} - -func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := CollectorSubmitBatchesArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := CollectorSubmitBatchesResult{} - var retval []*BatchSubmitResponse - if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error()) - oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batches -type CollectorSubmitBatchesArgs struct { - Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` -} - -func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { - return &CollectorSubmitBatchesArgs{} -} - - -func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { - return p.Batches -} -func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Batch, 0, size) - p.Batches = tSlice - for i := 0; i < size; i ++ { - _elem17 := &Batch{} - if err := _elem17.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) - } - p.Batches = append(p.Batches, _elem17) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Batches { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) } - return err -} - -func (p *CollectorSubmitBatchesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) -} - -// Attributes: -// - Success -type CollectorSubmitBatchesResult struct { - Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { - return &CollectorSubmitBatchesResult{} -} - -var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse - -func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { - return p.Success -} -func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BatchSubmitResponse, 0, size) - p.Success = tSlice - for i := 0; i < size; i ++ { - _elem18 := &BatchSubmitResponse{} - if err := _elem18.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) - } - p.Success = append(p.Success, _elem18) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *CollectorSubmitBatchesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) -} - - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go deleted file mode 100644 index 015ad4b067..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package sampling - -var GoUnusedProtection__ int; - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go deleted file mode 100644 index 5cc762824e..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package sampling - -import( - "bytes" - "context" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - - -func init() { -} - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go deleted file mode 100644 index 3bffa5b8ee..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package sampling - -import( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type SamplingStrategyType int64 -const ( - SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0 - SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1 -) - -func (p SamplingStrategyType) String() string { - switch p { - case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC" - case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING" - } - return "" -} - -func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) { - switch s { - case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil - case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil - } - return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string") -} - - -func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v } - -func (p SamplingStrategyType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *SamplingStrategyType) UnmarshalText(text []byte) error { -q, err := SamplingStrategyTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *SamplingStrategyType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = SamplingStrategyType(v) -return nil -} - -func (p * SamplingStrategyType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -// Attributes: -// - SamplingRate -type ProbabilisticSamplingStrategy struct { - SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"` -} - -func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy { - return &ProbabilisticSamplingStrategy{} -} - - -func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 { - return p.SamplingRate -} -func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetSamplingRate bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetSamplingRate = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetSamplingRate{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set")); - } - return nil -} - -func (p *ProbabilisticSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.SamplingRate = v -} - return nil -} - -func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) } - if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) } - return err -} - -func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.SamplingRate != other.SamplingRate { return false } - return true -} - -func (p *ProbabilisticSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - MaxTracesPerSecond -type RateLimitingSamplingStrategy struct { - MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"` -} - -func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy { - return &RateLimitingSamplingStrategy{} -} - - -func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 { - return p.MaxTracesPerSecond -} -func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetMaxTracesPerSecond bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I16 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetMaxTracesPerSecond = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetMaxTracesPerSecond{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set")); - } - return nil -} - -func (p *RateLimitingSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.MaxTracesPerSecond = v -} - return nil -} - -func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) } - if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) } - return err -} - -func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false } - return true -} - -func (p *RateLimitingSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - Operation -// - ProbabilisticSampling -type OperationSamplingStrategy struct { - Operation string `thrift:"operation,1,required" db:"operation" json:"operation"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"` -} - -func NewOperationSamplingStrategy() *OperationSamplingStrategy { - return &OperationSamplingStrategy{} -} - - -func (p *OperationSamplingStrategy) GetOperation() string { - return p.Operation -} -var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy -func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT - } -return p.ProbabilisticSampling -} -func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOperation bool = false; - var issetProbabilisticSampling bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOperation = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetProbabilisticSampling = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOperation{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set")); - } - if !issetProbabilisticSampling{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set")); - } - return nil -} - -func (p *OperationSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Operation = v -} - return nil -} - -func (p *OperationSamplingStrategy) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Operation)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) } - return err -} - -func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) } - if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) } - return err -} - -func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Operation != other.Operation { return false } - if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false } - return true -} - -func (p *OperationSamplingStrategy) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p) -} - -// Attributes: -// - DefaultSamplingProbability -// - DefaultLowerBoundTracesPerSecond -// - PerOperationStrategies -// - DefaultUpperBoundTracesPerSecond -type PerOperationSamplingStrategies struct { - DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"` - DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"` - PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"` - DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"` -} - -func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies { - return &PerOperationSamplingStrategies{} -} - - -func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 { - return p.DefaultSamplingProbability -} - -func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 { - return p.DefaultLowerBoundTracesPerSecond -} - -func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy { - return p.PerOperationStrategies -} -var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64 -func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 { - if !p.IsSetDefaultUpperBoundTracesPerSecond() { - return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT - } -return *p.DefaultUpperBoundTracesPerSecond -} -func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool { - return p.DefaultUpperBoundTracesPerSecond != nil -} - -func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetDefaultSamplingProbability bool = false; - var issetDefaultLowerBoundTracesPerSecond bool = false; - var issetPerOperationStrategies bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetDefaultSamplingProbability = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetDefaultLowerBoundTracesPerSecond = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetPerOperationStrategies = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetDefaultSamplingProbability{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set")); - } - if !issetDefaultLowerBoundTracesPerSecond{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set")); - } - if !issetPerOperationStrategies{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set")); - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.DefaultSamplingProbability = v -} - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.DefaultLowerBoundTracesPerSecond = v -} - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*OperationSamplingStrategy, 0, size) - p.PerOperationStrategies = tSlice - for i := 0; i < size; i ++ { - _elem0 := &OperationSamplingStrategy{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *PerOperationSamplingStrategies) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.DefaultUpperBoundTracesPerSecond = &v -} - return nil -} - -func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) } - if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) } - return err -} - -func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) } - if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) } - return err -} - -func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PerOperationStrategies { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) } - return err -} - -func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDefaultUpperBoundTracesPerSecond() { - if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) } - if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) } - } - return err -} - -func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false } - if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false } - if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false } - for i, _tgt := range p.PerOperationStrategies { - _src1 := other.PerOperationStrategies[i] - if !_tgt.Equals(_src1) { return false } - } - if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond { - if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil { - return false - } - if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false } - } - return true -} - -func (p *PerOperationSamplingStrategies) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p) -} - -// Attributes: -// - StrategyType -// - ProbabilisticSampling -// - RateLimitingSampling -// - OperationSampling -type SamplingStrategyResponse struct { - StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"` - ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"` - RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"` - OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"` -} - -func NewSamplingStrategyResponse() *SamplingStrategyResponse { - return &SamplingStrategyResponse{} -} - - -func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType { - return p.StrategyType -} -var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy -func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy { - if !p.IsSetProbabilisticSampling() { - return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT - } -return p.ProbabilisticSampling -} -var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy -func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy { - if !p.IsSetRateLimitingSampling() { - return SamplingStrategyResponse_RateLimitingSampling_DEFAULT - } -return p.RateLimitingSampling -} -var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies -func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies { - if !p.IsSetOperationSampling() { - return SamplingStrategyResponse_OperationSampling_DEFAULT - } -return p.OperationSampling -} -func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool { - return p.ProbabilisticSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool { - return p.RateLimitingSampling != nil -} - -func (p *SamplingStrategyResponse) IsSetOperationSampling() bool { - return p.OperationSampling != nil -} - -func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetStrategyType bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetStrategyType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetStrategyType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set")); - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := SamplingStrategyType(v) - p.StrategyType = temp -} - return nil -} - -func (p *SamplingStrategyResponse) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{} - if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.RateLimitingSampling = &RateLimitingSamplingStrategy{} - if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.OperationSampling = &PerOperationSamplingStrategies{} - if err := p.OperationSampling.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err) - } - return nil -} - -func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) } - return err -} - -func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetProbabilisticSampling() { - if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) } - if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) } - } - return err -} - -func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetRateLimitingSampling() { - if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) } - if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) } - } - return err -} - -func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetOperationSampling() { - if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) } - if err := p.OperationSampling.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) } - } - return err -} - -func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.StrategyType != other.StrategyType { return false } - if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false } - if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false } - if !p.OperationSampling.Equals(other.OperationSampling) { return false } - return true -} - -func (p *SamplingStrategyResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p) -} - -type SamplingManager interface { - // Parameters: - // - ServiceName - GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) -} - -type SamplingManagerClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient { - return &SamplingManagerClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient { - return &SamplingManagerClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient { - return &SamplingManagerClient{ - c: c, - } -} - -func (p *SamplingManagerClient) Client_() thrift.TClient { - return p.c -} - -func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - ServiceName -func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) { - var _args2 SamplingManagerGetSamplingStrategyArgs - _args2.ServiceName = serviceName - var _result4 SamplingManagerGetSamplingStrategyResult - var _meta3 thrift.ResponseMeta - _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4) - p.SetLastResponseMeta_(_meta3) - if _err != nil { - return - } - return _result4.GetSuccess(), nil -} - -type SamplingManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler SamplingManager -} - -func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor { - - self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler} -return self5 -} - -func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x6.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x6 - -} - -type samplingManagerProcessorGetSamplingStrategy struct { - handler SamplingManager -} - -func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := SamplingManagerGetSamplingStrategyArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := SamplingManagerGetSamplingStrategyResult{} - var retval *SamplingStrategyResponse - if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error()) - oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type SamplingManagerGetSamplingStrategyArgs struct { - ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` -} - -func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs { - return &SamplingManagerGetSamplingStrategyArgs{} -} - - -func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string { - return p.ServiceName -} -func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.ServiceName = v -} - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } - return err -} - -func (p *SamplingManagerGetSamplingStrategyArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p) -} - -// Attributes: -// - Success -type SamplingManagerGetSamplingStrategyResult struct { - Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult { - return &SamplingManagerGetSamplingStrategyResult{} -} - -var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse -func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse { - if !p.IsSetSuccess() { - return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT - } -return p.Success -} -func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - p.Success = &SamplingStrategyResponse{} - if err := p.Success.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) - } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := p.Success.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *SamplingManagerGetSamplingStrategyResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p) -} - - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go deleted file mode 100644 index b00ecd23fc..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go +++ /dev/null @@ -1,1853 +0,0 @@ -// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. - -package zipkincore - -import( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - "github.com/uber/jaeger-client-go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -type AnnotationType int64 -const ( - AnnotationType_BOOL AnnotationType = 0 - AnnotationType_BYTES AnnotationType = 1 - AnnotationType_I16 AnnotationType = 2 - AnnotationType_I32 AnnotationType = 3 - AnnotationType_I64 AnnotationType = 4 - AnnotationType_DOUBLE AnnotationType = 5 - AnnotationType_STRING AnnotationType = 6 -) - -func (p AnnotationType) String() string { - switch p { - case AnnotationType_BOOL: return "BOOL" - case AnnotationType_BYTES: return "BYTES" - case AnnotationType_I16: return "I16" - case AnnotationType_I32: return "I32" - case AnnotationType_I64: return "I64" - case AnnotationType_DOUBLE: return "DOUBLE" - case AnnotationType_STRING: return "STRING" - } - return "" -} - -func AnnotationTypeFromString(s string) (AnnotationType, error) { - switch s { - case "BOOL": return AnnotationType_BOOL, nil - case "BYTES": return AnnotationType_BYTES, nil - case "I16": return AnnotationType_I16, nil - case "I32": return AnnotationType_I32, nil - case "I64": return AnnotationType_I64, nil - case "DOUBLE": return AnnotationType_DOUBLE, nil - case "STRING": return AnnotationType_STRING, nil - } - return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") -} - - -func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } - -func (p AnnotationType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *AnnotationType) UnmarshalText(text []byte) error { -q, err := AnnotationTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *AnnotationType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = AnnotationType(v) -return nil -} - -func (p * AnnotationType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -// Indicates the network context of a service recording an annotation with two -// exceptions. -// -// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, -// the endpoint indicates the source or destination of an RPC. This exception -// allows zipkin to display network context of uninstrumented services, or -// clients such as web browsers. -// -// Attributes: -// - Ipv4: IPv4 host address packed into 4 bytes. -// -// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 -// - Port: IPv4 port -// -// Note: this is to be treated as an unsigned integer, so watch for negatives. -// -// Conventionally, when the port isn't known, port = 0. -// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" -// -// Conventionally, when the service name isn't known, service_name = "unknown". -// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() -type Endpoint struct { - Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` - Port int16 `thrift:"port,2" db:"port" json:"port"` - ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` - Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` -} - -func NewEndpoint() *Endpoint { - return &Endpoint{} -} - - -func (p *Endpoint) GetIpv4() int32 { - return p.Ipv4 -} - -func (p *Endpoint) GetPort() int16 { - return p.Port -} - -func (p *Endpoint) GetServiceName() string { - return p.ServiceName -} -var Endpoint_Ipv6_DEFAULT []byte - -func (p *Endpoint) GetIpv6() []byte { - return p.Ipv6 -} -func (p *Endpoint) IsSetIpv6() bool { - return p.Ipv6 != nil -} - -func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I16 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Ipv4 = v -} - return nil -} - -func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Port = v -} - return nil -} - -func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.ServiceName = v -} - return nil -} - -func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.Ipv6 = v -} - return nil -} - -func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) } - return err -} - -func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) } - if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) } - return err -} - -func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) } - return err -} - -func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIpv6() { - if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) } - if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) } - } - return err -} - -func (p *Endpoint) Equals(other *Endpoint) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ipv4 != other.Ipv4 { return false } - if p.Port != other.Port { return false } - if p.ServiceName != other.ServiceName { return false } - if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false } - return true -} - -func (p *Endpoint) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Endpoint(%+v)", *p) -} - -// An annotation is similar to a log statement. It includes a host field which -// allows these events to be attributed properly, and also aggregatable. -// -// Attributes: -// - Timestamp: Microseconds from epoch. -// -// This value should use the most precise value possible. For example, -// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. -// - Value -// - Host: Always the host that recorded the event. By specifying the host you allow -// rollup of all events (such as client requests to a service) by IP address. -type Annotation struct { - Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` - Value string `thrift:"value,2" db:"value" json:"value"` - Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` -} - -func NewAnnotation() *Annotation { - return &Annotation{} -} - - -func (p *Annotation) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Annotation) GetValue() string { - return p.Value -} -var Annotation_Host_DEFAULT *Endpoint -func (p *Annotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return Annotation_Host_DEFAULT - } -return p.Host -} -func (p *Annotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Timestamp = v -} - return nil -} - -func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Value = v -} - return nil -} - -func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } - return err -} - -func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } - return err -} - -func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) } - } - return err -} - -func (p *Annotation) Equals(other *Annotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Timestamp != other.Timestamp { return false } - if p.Value != other.Value { return false } - if !p.Host.Equals(other.Host) { return false } - return true -} - -func (p *Annotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Annotation(%+v)", *p) -} - -// Binary annotations are tags applied to a Span to give it context. For -// example, a binary annotation of "http.uri" could the path to a resource in a -// RPC call. -// -// Binary annotations of type STRING are always queryable, though more a -// historical implementation detail than a structural concern. -// -// Binary annotations can repeat, and vary on the host. Similar to Annotation, -// the host indicates who logged the event. This allows you to tell the -// difference between the client and server side of the same key. For example, -// the key "http.uri" might be different on the client and server side due to -// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, -// you can see the different points of view, which often help in debugging. -// -// Attributes: -// - Key -// - Value -// - AnnotationType -// - Host: The host that recorded tag, which allows you to differentiate between -// multiple tags with the same key. There are two exceptions to this. -// -// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or -// destination of an RPC. This exception allows zipkin to display network -// context of uninstrumented services, or clients such as web browsers. -type BinaryAnnotation struct { - Key string `thrift:"key,1" db:"key" json:"key"` - Value []byte `thrift:"value,2" db:"value" json:"value"` - AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` - Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` -} - -func NewBinaryAnnotation() *BinaryAnnotation { - return &BinaryAnnotation{} -} - - -func (p *BinaryAnnotation) GetKey() string { - return p.Key -} - -func (p *BinaryAnnotation) GetValue() []byte { - return p.Value -} - -func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { - return p.AnnotationType -} -var BinaryAnnotation_Host_DEFAULT *Endpoint -func (p *BinaryAnnotation) GetHost() *Endpoint { - if !p.IsSetHost() { - return BinaryAnnotation_Host_DEFAULT - } -return p.Host -} -func (p *BinaryAnnotation) IsSetHost() bool { - return p.Host != nil -} - -func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Key = v -} - return nil -} - -func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Value = v -} - return nil -} - -func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := AnnotationType(v) - p.AnnotationType = temp -} - return nil -} - -func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Host = &Endpoint{} - if err := p.Host.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) - } - return nil -} - -func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } - return err -} - -func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } - if err := oprot.WriteBinary(ctx, p.Value); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } - return err -} - -func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) } - return err -} - -func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetHost() { - if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) } - if err := p.Host.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) } - } - return err -} - -func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { return false } - if bytes.Compare(p.Value, other.Value) != 0 { return false } - if p.AnnotationType != other.AnnotationType { return false } - if !p.Host.Equals(other.Host) { return false } - return true -} - -func (p *BinaryAnnotation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BinaryAnnotation(%+v)", *p) -} - -// A trace is a series of spans (often RPC calls) which form a latency tree. -// -// The root span is where trace_id = id and parent_id = Nil. The root span is -// usually the longest interval in the trace, starting with a SERVER_RECV -// annotation and ending with a SERVER_SEND. -// -// Attributes: -// - TraceID -// - Name: Span name in lowercase, rpc method for example -// -// Conventionally, when the span name isn't known, name = "unknown". -// - ID -// - ParentID -// - Annotations -// - BinaryAnnotations -// - Debug -// - Timestamp: Microseconds from epoch of the creation of this span. -// -// This value should be set directly by instrumentation, using the most -// precise value possible. For example, gettimeofday or syncing nanoTime -// against a tick of currentTimeMillis. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this via Annotation.timestamp. -// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. -// -// This field is optional for compatibility with old data: first-party span -// stores are expected to support this at time of introduction. -// - Duration: Measurement of duration in microseconds, used to support queries. -// -// This value should be set directly, where possible. Doing so encourages -// precise measurement decoupled from problems of clocks, such as skew or NTP -// updates causing time to move backwards. -// -// For compatibility with instrumentation that precede this field, collectors -// or span stores can derive this by subtracting Annotation.timestamp. -// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. -// -// If this field is persisted as unset, zipkin will continue to work, except -// duration query support will be implementation-specific. Similarly, setting -// this field non-atomically is implementation-specific. -// -// This field is i64 vs i32 to support spans longer than 35 minutes. -// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this -// means the trace uses 128 bit traceIds instead of 64 bit. -type Span struct { - TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` - // unused field # 2 - Name string `thrift:"name,3" db:"name" json:"name"` - ID int64 `thrift:"id,4" db:"id" json:"id"` - ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` - Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` - // unused field # 7 - BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` - Debug bool `thrift:"debug,9" db:"debug" json:"debug"` - Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` - Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` - TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - - -func (p *Span) GetTraceID() int64 { - return p.TraceID -} - -func (p *Span) GetName() string { - return p.Name -} - -func (p *Span) GetID() int64 { - return p.ID -} -var Span_ParentID_DEFAULT int64 -func (p *Span) GetParentID() int64 { - if !p.IsSetParentID() { - return Span_ParentID_DEFAULT - } -return *p.ParentID -} - -func (p *Span) GetAnnotations() []*Annotation { - return p.Annotations -} - -func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { - return p.BinaryAnnotations -} -var Span_Debug_DEFAULT bool = false - -func (p *Span) GetDebug() bool { - return p.Debug -} -var Span_Timestamp_DEFAULT int64 -func (p *Span) GetTimestamp() int64 { - if !p.IsSetTimestamp() { - return Span_Timestamp_DEFAULT - } -return *p.Timestamp -} -var Span_Duration_DEFAULT int64 -func (p *Span) GetDuration() int64 { - if !p.IsSetDuration() { - return Span_Duration_DEFAULT - } -return *p.Duration -} -var Span_TraceIDHigh_DEFAULT int64 -func (p *Span) GetTraceIDHigh() int64 { - if !p.IsSetTraceIDHigh() { - return Span_TraceIDHigh_DEFAULT - } -return *p.TraceIDHigh -} -func (p *Span) IsSetParentID() bool { - return p.ParentID != nil -} - -func (p *Span) IsSetDebug() bool { - return p.Debug != Span_Debug_DEFAULT -} - -func (p *Span) IsSetTimestamp() bool { - return p.Timestamp != nil -} - -func (p *Span) IsSetDuration() bool { - return p.Duration != nil -} - -func (p *Span) IsSetTraceIDHigh() bool { - return p.TraceIDHigh != nil -} - -func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.LIST { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.I64 { - if err := p.ReadField12(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.TraceID = v -} - return nil -} - -func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.Name = v -} - return nil -} - -func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.ID = v -} - return nil -} - -func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.ParentID = &v -} - return nil -} - -func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Annotation, 0, size) - p.Annotations = tSlice - for i := 0; i < size; i ++ { - _elem0 := &Annotation{} - if err := _elem0.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Annotations = append(p.Annotations, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BinaryAnnotation, 0, size) - p.BinaryAnnotations = tSlice - for i := 0; i < size; i ++ { - _elem1 := &BinaryAnnotation{} - if err := _elem1.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.Debug = v -} - return nil -} - -func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 10: ", err) -} else { - p.Timestamp = &v -} - return nil -} - -func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 11: ", err) -} else { - p.Duration = &v -} - return nil -} - -func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 12: ", err) -} else { - p.TraceIDHigh = &v -} - return nil -} - -func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - if err := p.writeField10(ctx, oprot); err != nil { return err } - if err := p.writeField11(ctx, oprot); err != nil { return err } - if err := p.writeField12(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) } - return err -} - -func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) } - return err -} - -func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) } - return err -} - -func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetParentID() { - if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) } - } - return err -} - -func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Annotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) } - return err -} - -func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.BinaryAnnotations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) } - return err -} - -func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDebug() { - if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) } - } - return err -} - -func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTimestamp() { - if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) } - } - return err -} - -func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDuration() { - if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) } - } - return err -} - -func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTraceIDHigh() { - if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) } - } - return err -} - -func (p *Span) Equals(other *Span) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.TraceID != other.TraceID { return false } - if p.Name != other.Name { return false } - if p.ID != other.ID { return false } - if p.ParentID != other.ParentID { - if p.ParentID == nil || other.ParentID == nil { - return false - } - if (*p.ParentID) != (*other.ParentID) { return false } - } - if len(p.Annotations) != len(other.Annotations) { return false } - for i, _tgt := range p.Annotations { - _src2 := other.Annotations[i] - if !_tgt.Equals(_src2) { return false } - } - if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false } - for i, _tgt := range p.BinaryAnnotations { - _src3 := other.BinaryAnnotations[i] - if !_tgt.Equals(_src3) { return false } - } - if p.Debug != other.Debug { return false } - if p.Timestamp != other.Timestamp { - if p.Timestamp == nil || other.Timestamp == nil { - return false - } - if (*p.Timestamp) != (*other.Timestamp) { return false } - } - if p.Duration != other.Duration { - if p.Duration == nil || other.Duration == nil { - return false - } - if (*p.Duration) != (*other.Duration) { return false } - } - if p.TraceIDHigh != other.TraceIDHigh { - if p.TraceIDHigh == nil || other.TraceIDHigh == nil { - return false - } - if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false } - } - return true -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - Ok -type Response struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewResponse() *Response { - return &Response{} -} - - -func (p *Response) GetOk() bool { - return p.Ok -} -func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOk = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")); - } - return nil -} - -func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Ok = v -} - return nil -} - -func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) } - return err -} - -func (p *Response) Equals(other *Response) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Ok != other.Ok { return false } - return true -} - -func (p *Response) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Response(%+v)", *p) -} - -type ZipkinCollector interface { - // Parameters: - // - Spans - SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) -} - -type ZipkinCollectorClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { - return &ZipkinCollectorClient{ - c: c, - } -} - -func (p *ZipkinCollectorClient) Client_() thrift.TClient { - return p.c -} - -func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// Parameters: -// - Spans -func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { - var _args4 ZipkinCollectorSubmitZipkinBatchArgs - _args4.Spans = spans - var _result6 ZipkinCollectorSubmitZipkinBatchResult - var _meta5 thrift.ResponseMeta - _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) - p.SetLastResponseMeta_(_meta5) - if _err != nil { - return - } - return _result6.GetSuccess(), nil -} - -type ZipkinCollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler ZipkinCollector -} - -func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { - - self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler} -return self7 -} - -func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x8.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x8 - -} - -type zipkinCollectorProcessorSubmitZipkinBatch struct { - handler ZipkinCollector -} - -func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := ZipkinCollectorSubmitZipkinBatchArgs{} - var err2 error - if err2 = args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel() - return - } - } - } - }(tickerCtx, cancel) - } - - result := ZipkinCollectorSubmitZipkinBatchResult{} - var retval []*Response - if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { - tickerCancel() - if err2 == thrift.ErrAbandonRequest { - return false, thrift.WrapTException(err2) - } - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error()) - oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return true, thrift.WrapTException(err2) - } else { - result.Success = retval - } - tickerCancel() - if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = thrift.WrapTException(err2) - } - if err != nil { - return - } - return true, err -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Spans -type ZipkinCollectorSubmitZipkinBatchArgs struct { - Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` -} - -func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { - return &ZipkinCollectorSubmitZipkinBatchArgs{} -} - - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { - return p.Spans -} -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i ++ { - _elem9 := &Span{} - if err := _elem9.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) - } - p.Spans = append(p.Spans, _elem9) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) -} - -// Attributes: -// - Success -type ZipkinCollectorSubmitZipkinBatchResult struct { - Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { - return &ZipkinCollectorSubmitZipkinBatchResult{} -} - -var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response - -func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { - return p.Success -} -func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Response, 0, size) - p.Success = tSlice - for i := 0; i < size; i ++ { - _elem10 := &Response{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Success = append(p.Success, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) -} - - diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md deleted file mode 100644 index c4c38ae01a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Apache Thrift - -This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237). - -It is vendored code to avoid compatibility issues with Thrift versions. - -The file logger.go is modified to remove dependency on "testing" (see Issue #585). - -See: - * https://github.com/jaegertracing/jaeger-client-go/pull/584 - * https://github.com/jaegertracing/jaeger-client-go/pull/303 diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go deleted file mode 100644 index 50d44ec8ea..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "os" -) - -// Logger is a simple wrapper of a logging function. -// -// In reality the users might actually use different logging libraries, and they -// are not always compatible with each other. -// -// Logger is meant to be a simple common ground that it's easy to wrap whatever -// logging library they use into. -// -// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design -// discussion behind it. -type Logger func(msg string) - -// NopLogger is a Logger implementation that does nothing. -func NopLogger(msg string) {} - -// StdLogger wraps stdlib log package into a Logger. -// -// If logger passed in is nil, it will fallback to use stderr and default flags. -func StdLogger(logger *log.Logger) Logger { - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - return func(msg string) { - logger.Print(msg) - } -} - -func fallbackLogger(logger Logger) Logger { - if logger == nil { - return StdLogger(nil) - } - return logger -} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go deleted file mode 100644 index 9a627bed5a..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "io" - "math/rand" - "os" - "reflect" - "strconv" - "sync" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - - "github.com/uber/jaeger-client-go/internal/baggage" - "github.com/uber/jaeger-client-go/internal/throttler" - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/utils" -) - -// Tracer implements opentracing.Tracer. -type Tracer struct { - serviceName string - hostIPv4 uint32 // this is for zipkin endpoint conversion - - sampler SamplerV2 - reporter Reporter - metrics Metrics - logger log.DebugLogger - - timeNow func() time.Time - randomNumber func() uint64 - - options struct { - gen128Bit bool // whether to generate 128bit trace IDs - zipkinSharedRPCSpan bool - highTraceIDGenerator func() uint64 // custom high trace ID generator - maxTagValueLength int - noDebugFlagOnForcedSampling bool - maxLogsPerSpan int - // more options to come - } - // allocator of Span objects - spanAllocator SpanAllocator - - injectors map[interface{}]Injector - extractors map[interface{}]Extractor - - observer compositeObserver - - tags []Tag - process Process - - baggageRestrictionManager baggage.RestrictionManager - baggageSetter *baggageSetter - - debugThrottler throttler.Throttler -} - -// NewTracer creates Tracer implementation that reports tracing to Jaeger. -// The returned io.Closer can be used in shutdown hooks to ensure that the internal -// queue of the Reporter is drained and all buffered spans are submitted to collectors. -// TODO (breaking change) return *Tracer only, without closer. -func NewTracer( - serviceName string, - sampler Sampler, - reporter Reporter, - options ...TracerOption, -) (opentracing.Tracer, io.Closer) { - t := &Tracer{ - serviceName: serviceName, - sampler: samplerV1toV2(sampler), - reporter: reporter, - injectors: make(map[interface{}]Injector), - extractors: make(map[interface{}]Extractor), - metrics: *NewNullMetrics(), - spanAllocator: simpleSpanAllocator{}, - } - - for _, option := range options { - option(t) - } - - // register default injectors/extractors unless they are already provided via options - textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics) - t.addCodec(opentracing.TextMap, textPropagator, textPropagator) - - httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics) - t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) - - binaryPropagator := NewBinaryPropagator(t) - t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator) - - // TODO remove after TChannel supports OpenTracing - interopPropagator := &jaegerTraceContextPropagator{tracer: t} - t.addCodec(SpanContextFormat, interopPropagator, interopPropagator) - - zipkinPropagator := &zipkinPropagator{tracer: t} - t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator) - - if t.baggageRestrictionManager != nil { - t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics) - } else { - t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics) - } - if t.debugThrottler == nil { - t.debugThrottler = throttler.DefaultThrottler{} - } - - if t.randomNumber == nil { - seedGenerator := utils.NewRand(time.Now().UnixNano()) - pool := sync.Pool{ - New: func() interface{} { - return rand.NewSource(seedGenerator.Int63()) - }, - } - - t.randomNumber = func() uint64 { - generator := pool.Get().(rand.Source) - number := uint64(generator.Int63()) - pool.Put(generator) - return number - } - } - if t.timeNow == nil { - t.timeNow = time.Now - } - if t.logger == nil { - t.logger = log.NullLogger - } - // Set tracer-level tags - t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion}) - if hostname, err := os.Hostname(); err == nil { - t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname}) - } - if ipval, ok := t.getTag(TracerIPTagKey); ok { - ipv4, err := utils.ParseIPToUint32(ipval.(string)) - if err != nil { - t.hostIPv4 = 0 - t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error()) - } else { - t.hostIPv4 = ipv4 - } - } else if ip, err := utils.HostIP(); err == nil { - t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()}) - t.hostIPv4 = utils.PackIPAsUint32(ip) - } else { - t.logger.Error("Unable to determine this host's IP address: " + err.Error()) - } - - if t.options.gen128Bit { - if t.options.highTraceIDGenerator == nil { - t.options.highTraceIDGenerator = t.randomNumber - } - } else if t.options.highTraceIDGenerator != nil { - t.logger.Error("Overriding high trace ID generator but not generating " + - "128 bit trace IDs, consider enabling the \"Gen128Bit\" option") - } - if t.options.maxTagValueLength == 0 { - t.options.maxTagValueLength = DefaultMaxTagValueLength - } - t.process = Process{ - Service: serviceName, - UUID: strconv.FormatUint(t.randomNumber(), 16), - Tags: t.tags, - } - if throttler, ok := t.debugThrottler.(ProcessSetter); ok { - throttler.SetProcess(t.process) - } - - return t, t -} - -// addCodec adds registers injector and extractor for given propagation format if not already defined. -func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) { - if _, ok := t.injectors[format]; !ok { - t.injectors[format] = injector - } - if _, ok := t.extractors[format]; !ok { - t.extractors[format] = extractor - } -} - -// StartSpan implements StartSpan() method of opentracing.Tracer. -func (t *Tracer) StartSpan( - operationName string, - options ...opentracing.StartSpanOption, -) opentracing.Span { - sso := opentracing.StartSpanOptions{} - for _, o := range options { - o.Apply(&sso) - } - return t.startSpanWithOptions(operationName, sso) -} - -func (t *Tracer) startSpanWithOptions( - operationName string, - options opentracing.StartSpanOptions, -) opentracing.Span { - if options.StartTime.IsZero() { - options.StartTime = t.timeNow() - } - - // Predicate whether the given span context is an empty reference - // or may be used as parent / debug ID / baggage items source - isEmptyReference := func(ctx SpanContext) bool { - return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0 - } - - var references []Reference - var parent SpanContext - var hasParent bool // need this because `parent` is a value, not reference - var ctx SpanContext - var isSelfRef bool - for _, ref := range options.References { - ctxRef, ok := ref.ReferencedContext.(SpanContext) - if !ok { - t.logger.Error(fmt.Sprintf( - "Reference contains invalid type of SpanReference: %s", - reflect.ValueOf(ref.ReferencedContext))) - continue - } - if isEmptyReference(ctxRef) { - continue - } - - if ref.Type == selfRefType { - isSelfRef = true - ctx = ctxRef - continue - } - - if ctxRef.IsValid() { - // we don't want empty context that contains only debug-id or baggage - references = append(references, Reference{Type: ref.Type, Context: ctxRef}) - } - - if !hasParent { - parent = ctxRef - hasParent = ref.Type == opentracing.ChildOfRef - } - } - if !hasParent && !isEmptyReference(parent) { - // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from - // the FollowFromRef as the parent - hasParent = true - } - - rpcServer := false - if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok { - rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum)) - } - - var internalTags []Tag - newTrace := false - if !isSelfRef { - if !hasParent || !parent.IsValid() { - newTrace = true - ctx.traceID.Low = t.randomID() - if t.options.gen128Bit { - ctx.traceID.High = t.options.highTraceIDGenerator() - } - ctx.spanID = SpanID(ctx.traceID.Low) - ctx.parentID = 0 - ctx.samplingState = &samplingState{ - localRootSpan: ctx.spanID, - } - if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) { - ctx.samplingState.setDebugAndSampled() - internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID}) - } - } else { - ctx.traceID = parent.traceID - if rpcServer && t.options.zipkinSharedRPCSpan { - // Support Zipkin's one-span-per-RPC model - ctx.spanID = parent.spanID - ctx.parentID = parent.parentID - } else { - ctx.spanID = SpanID(t.randomID()) - ctx.parentID = parent.spanID - } - ctx.samplingState = parent.samplingState - if parent.remote { - ctx.samplingState.setFinal() - ctx.samplingState.localRootSpan = ctx.spanID - } - } - if hasParent { - // copy baggage items - if l := len(parent.baggage); l > 0 { - ctx.baggage = make(map[string]string, len(parent.baggage)) - for k, v := range parent.baggage { - ctx.baggage[k] = v - } - } - } - } - - sp := t.newSpan() - sp.context = ctx - sp.tracer = t - sp.operationName = operationName - sp.startTime = options.StartTime - sp.duration = 0 - sp.references = references - sp.firstInProcess = rpcServer || sp.context.parentID == 0 - - if !sp.context.isSamplingFinalized() { - decision := t.sampler.OnCreateSpan(sp) - sp.applySamplingDecision(decision, false) - } - sp.observer = t.observer.OnStartSpan(sp, operationName, options) - - if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 { - if sp.tags == nil || cap(sp.tags) < tagsTotalLength { - sp.tags = make([]Tag, 0, tagsTotalLength) - } - sp.tags = append(sp.tags, internalTags...) - for k, v := range options.Tags { - sp.setTagInternal(k, v, false) - } - } - t.emitNewSpanMetrics(sp, newTrace) - return sp -} - -// Inject implements Inject() method of opentracing.Tracer -func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error { - c, ok := ctx.(SpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - if injector, ok := t.injectors[format]; ok { - return injector.Inject(c, carrier) - } - return opentracing.ErrUnsupportedFormat -} - -// Extract implements Extract() method of opentracing.Tracer -func (t *Tracer) Extract( - format interface{}, - carrier interface{}, -) (opentracing.SpanContext, error) { - if extractor, ok := t.extractors[format]; ok { - spanCtx, err := extractor.Extract(carrier) - if err != nil { - return nil, err // ensure returned spanCtx is nil - } - spanCtx.remote = true - return spanCtx, nil - } - return nil, opentracing.ErrUnsupportedFormat -} - -// Close releases all resources used by the Tracer and flushes any remaining buffered spans. -func (t *Tracer) Close() error { - t.logger.Debugf("closing tracer") - t.reporter.Close() - t.sampler.Close() - if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok { - _ = mgr.Close() - } - if throttler, ok := t.debugThrottler.(io.Closer); ok { - _ = throttler.Close() - } - return nil -} - -// Tags returns a slice of tracer-level tags. -func (t *Tracer) Tags() []opentracing.Tag { - tags := make([]opentracing.Tag, len(t.tags)) - for i, tag := range t.tags { - tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value} - } - return tags -} - -// getTag returns the value of specific tag, if not exists, return nil. -// TODO only used by tests, move there. -func (t *Tracer) getTag(key string) (interface{}, bool) { - for _, tag := range t.tags { - if tag.key == key { - return tag.value, true - } - } - return nil, false -} - -// newSpan returns an instance of a clean Span object. -// If options.PoolSpans is true, the spans are retrieved from an object pool. -func (t *Tracer) newSpan() *Span { - return t.spanAllocator.Get() -} - -// emitNewSpanMetrics generates metrics on the number of started spans and traces. -// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the -// server-side RPC span has the exact same trace/span/parent IDs as the -// calling client-side span, but obviously the server side span is -// no longer a root span of the trace. -func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) { - if !sp.context.isSamplingFinalized() { - t.metrics.SpansStartedDelayedSampling.Inc(1) - if newTrace { - t.metrics.TracesStartedDelayedSampling.Inc(1) - } - // joining a trace is not possible, because sampling decision inherited from upstream is final - } else if sp.context.IsSampled() { - t.metrics.SpansStartedSampled.Inc(1) - if newTrace { - t.metrics.TracesStartedSampled.Inc(1) - } else if sp.firstInProcess { - t.metrics.TracesJoinedSampled.Inc(1) - } - } else { - t.metrics.SpansStartedNotSampled.Inc(1) - if newTrace { - t.metrics.TracesStartedNotSampled.Inc(1) - } else if sp.firstInProcess { - t.metrics.TracesJoinedNotSampled.Inc(1) - } - } -} - -func (t *Tracer) reportSpan(sp *Span) { - ctx := sp.SpanContext() - - if !ctx.isSamplingFinalized() { - t.metrics.SpansFinishedDelayedSampling.Inc(1) - } else if ctx.IsSampled() { - t.metrics.SpansFinishedSampled.Inc(1) - } else { - t.metrics.SpansFinishedNotSampled.Inc(1) - } - - // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span, - // and then Release() it when no longer needed. - // Otherwise, the span may be reused for another trace and its data may be overwritten. - if ctx.IsSampled() { - t.reporter.Report(sp) - } - - sp.Release() -} - -// randomID generates a random trace/span ID, using tracer.random() generator. -// It never returns 0. -func (t *Tracer) randomID() uint64 { - val := t.randomNumber() - for val == 0 { - val = t.randomNumber() - } - return val -} - -// (NB) span must hold the lock before making this call -func (t *Tracer) setBaggage(sp *Span, key, value string) { - t.baggageSetter.setBaggage(sp, key, value) -} - -// (NB) span must hold the lock before making this call -func (t *Tracer) isDebugAllowed(operation string) bool { - return t.debugThrottler.IsAllowed(operation) -} - -// Sampler returns the sampler given to the tracer at creation. -func (t *Tracer) Sampler() SamplerV2 { - return t.sampler -} - -// SelfRef creates an opentracing compliant SpanReference from a jaeger -// SpanContext. This is a factory function in order to encapsulate jaeger specific -// types. -func SelfRef(ctx SpanContext) opentracing.SpanReference { - return opentracing.SpanReference{ - Type: selfRefType, - ReferencedContext: ctx, - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go deleted file mode 100644 index 16b4606564..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/uber/jaeger-client-go/internal/baggage" - "github.com/uber/jaeger-client-go/internal/throttler" - "github.com/uber/jaeger-client-go/log" -) - -// TracerOption is a function that sets some option on the tracer -type TracerOption func(tracer *Tracer) - -// TracerOptions is a factory for all available TracerOption's. -var TracerOptions TracerOptionsFactory - -// TracerOptionsFactory is a struct that defines functions for all available TracerOption's. -type TracerOptionsFactory struct{} - -// Metrics creates a TracerOption that initializes Metrics on the tracer, -// which is used to emit statistics. -func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption { - return func(tracer *Tracer) { - tracer.metrics = *m - } -} - -// Logger creates a TracerOption that gives the tracer a Logger. -func (TracerOptionsFactory) Logger(logger Logger) TracerOption { - return func(tracer *Tracer) { - tracer.logger = log.DebugLogAdapter(logger) - } -} - -// CustomHeaderKeys allows to override default HTTP header keys used to propagate -// tracing context. -func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption { - return func(tracer *Tracer) { - if headerKeys == nil { - return - } - textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics) - tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator) - - httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics) - tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator) - } -} - -// TimeNow creates a TracerOption that gives the tracer a function -// used to generate timestamps for spans. -func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption { - return func(tracer *Tracer) { - tracer.timeNow = timeNow - } -} - -// RandomNumber creates a TracerOption that gives the tracer -// a thread-safe random number generator function for generating trace IDs. -func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption { - return func(tracer *Tracer) { - tracer.randomNumber = randomNumber - } -} - -// PoolSpans creates a TracerOption that tells the tracer whether it should use -// an object pool to minimize span allocations. -// This should be used with care, only if the service is not running any async tasks -// that can access parent spans after those spans have been finished. -func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption { - return func(tracer *Tracer) { - if poolSpans { - tracer.spanAllocator = newSyncPollSpanAllocator() - } else { - tracer.spanAllocator = simpleSpanAllocator{} - } - } -} - -// HostIPv4 creates a TracerOption that identifies the current service/process. -// If not set, the factory method will obtain the current IP address. -// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP. -// -// Deprecated. -func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption { - return func(tracer *Tracer) { - tracer.hostIPv4 = hostIPv4 - } -} - -// Injector registers a Injector for given format. -func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption { - return func(tracer *Tracer) { - tracer.injectors[format] = injector - } -} - -// Extractor registers an Extractor for given format. -func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption { - return func(tracer *Tracer) { - tracer.extractors[format] = extractor - } -} - -// Observer registers an Observer. -func (t TracerOptionsFactory) Observer(observer Observer) TracerOption { - return t.ContribObserver(&oldObserver{obs: observer}) -} - -// ContribObserver registers a ContribObserver. -func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption { - return func(tracer *Tracer) { - tracer.observer.append(observer) - } -} - -// Gen128Bit enables generation of 128bit trace IDs. -func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption { - return func(tracer *Tracer) { - tracer.options.gen128Bit = gen128Bit - } -} - -// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context -// when the trace is force-started via sampling=1 span tag. -func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption { - return func(tracer *Tracer) { - tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling - } -} - -// HighTraceIDGenerator allows to override define ID generator. -func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { - return func(tracer *Tracer) { - tracer.options.highTraceIDGenerator = highTraceIDGenerator - } -} - -// MaxTagValueLength sets the limit on the max length of tag values. -func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption { - return func(tracer *Tracer) { - tracer.options.maxTagValueLength = maxTagValueLength - } -} - -// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero -// value). If a span has more logs than this value, logs are dropped as -// necessary (and replaced with a log describing how many were dropped). -// -// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about -// half are the newest logs. -func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption { - return func(tracer *Tracer) { - tracer.options.maxLogsPerSpan = maxLogsPerSpan - } -} - -// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID -// from the client span from the incoming request, for compatibility with Zipkin's -// "one span per RPC" model. -func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { - return func(tracer *Tracer) { - tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan - } -} - -// Tag adds a tracer-level tag that will be added to all spans. -func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption { - return func(tracer *Tracer) { - tracer.tags = append(tracer.tags, Tag{key: key, value: value}) - } -} - -// BaggageRestrictionManager registers BaggageRestrictionManager. -func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption { - return func(tracer *Tracer) { - tracer.baggageRestrictionManager = mgr - } -} - -// DebugThrottler registers a Throttler for debug spans. -func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption { - return func(tracer *Tracer) { - tracer.debugThrottler = throttler - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go deleted file mode 100644 index c5f5b19551..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "io" -) - -// Transport abstracts the method of sending spans out of process. -// Implementations are NOT required to be thread-safe; the RemoteReporter -// is expected to only call methods on the Transport from the same go-routine. -type Transport interface { - // Append converts the span to the wire representation and adds it - // to sender's internal buffer. If the buffer exceeds its designated - // size, the transport should call Flush() and return the number of spans - // flushed, otherwise return 0. If error is returned, the returned number - // of spans is treated as failed span, and reported to metrics accordingly. - Append(span *Span) (int, error) - - // Flush submits the internal buffer to the remote server. It returns the - // number of spans flushed. If error is returned, the returned number of - // spans is treated as failed span, and reported to metrics accordingly. - Flush() (int, error) - - io.Closer -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go deleted file mode 100644 index 6b961fb637..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport defines various transports that can be used with -// RemoteReporter to send spans out of process. Transport is responsible -// for serializing the spans into a specific format suitable for sending -// to the tracing backend. Examples may include Thrift over UDP, Thrift -// or JSON over HTTP, Thrift over Kafka, etc. -// -// Implementations are NOT required to be thread-safe; the RemoteReporter -// is expected to only call methods on the Transport from the same go-routine. -package transport diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go deleted file mode 100644 index 1d6f14d328..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport/http.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/uber/jaeger-client-go/thrift" - - "github.com/uber/jaeger-client-go" - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" -) - -// Default timeout for http request in seconds -const defaultHTTPTimeout = time.Second * 5 - -// HTTPTransport implements Transport by forwarding spans to a http server. -type HTTPTransport struct { - url string - client *http.Client - batchSize int - spans []*j.Span - process *j.Process - httpCredentials *HTTPBasicAuthCredentials - headers map[string]string -} - -// HTTPBasicAuthCredentials stores credentials for HTTP basic auth. -type HTTPBasicAuthCredentials struct { - username string - password string -} - -// HTTPOption sets a parameter for the HttpCollector -type HTTPOption func(c *HTTPTransport) - -// HTTPTimeout sets maximum timeout for http request. -func HTTPTimeout(duration time.Duration) HTTPOption { - return func(c *HTTPTransport) { c.client.Timeout = duration } -} - -// HTTPBatchSize sets the maximum batch size, after which a collect will be -// triggered. The default batch size is 100 spans. -func HTTPBatchSize(n int) HTTPOption { - return func(c *HTTPTransport) { c.batchSize = n } -} - -// HTTPBasicAuth sets the credentials required to perform HTTP basic auth -func HTTPBasicAuth(username string, password string) HTTPOption { - return func(c *HTTPTransport) { - c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password} - } -} - -// HTTPRoundTripper configures the underlying Transport on the *http.Client -// that is used -func HTTPRoundTripper(transport http.RoundTripper) HTTPOption { - return func(c *HTTPTransport) { - c.client.Transport = transport - } -} - -// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request -func HTTPHeaders(headers map[string]string) HTTPOption { - return func(c *HTTPTransport) { - c.headers = headers - } -} - -// NewHTTPTransport returns a new HTTP-backend transport. url should be an http -// url of the collector to handle POST request, typically something like: -// http://hostname:14268/api/traces?format=jaeger.thrift -func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport { - c := &HTTPTransport{ - url: url, - client: &http.Client{Timeout: defaultHTTPTimeout}, - batchSize: 100, - spans: []*j.Span{}, - } - - for _, option := range options { - option(c) - } - return c -} - -// Append implements Transport. -func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) { - if c.process == nil { - c.process = jaeger.BuildJaegerProcessThrift(span) - } - jSpan := jaeger.BuildJaegerThrift(span) - c.spans = append(c.spans, jSpan) - if len(c.spans) >= c.batchSize { - return c.Flush() - } - return 0, nil -} - -// Flush implements Transport. -func (c *HTTPTransport) Flush() (int, error) { - count := len(c.spans) - if count == 0 { - return 0, nil - } - err := c.send(c.spans) - c.spans = c.spans[:0] - return count, err -} - -// Close implements Transport. -func (c *HTTPTransport) Close() error { - return nil -} - -func (c *HTTPTransport) send(spans []*j.Span) error { - batch := &j.Batch{ - Spans: spans, - Process: c.process, - } - body, err := serializeThrift(batch) - if err != nil { - return err - } - req, err := http.NewRequest("POST", c.url, body) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/x-thrift") - for k, v := range c.headers { - req.Header.Set(k, v) - } - - if c.httpCredentials != nil { - req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password) - } - - resp, err := c.client.Do(req) - if err != nil { - return err - } - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - if resp.StatusCode >= http.StatusBadRequest { - return fmt.Errorf("error from collector: %d", resp.StatusCode) - } - return nil -} - -func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) { - t := thrift.NewTMemoryBuffer() - p := thrift.NewTBinaryProtocolTransport(t) - if err := obj.Write(context.Background(), p); err != nil { - return nil, err - } - return t.Buffer, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go deleted file mode 100644 index 00004124c8..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "context" - "errors" - "fmt" - - "github.com/uber/jaeger-client-go/internal/reporterstats" - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/thrift" - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/utils" -) - -// Empirically obtained constant for how many bytes in the message are used for envelope. -// The total datagram size is: -// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize -// -// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans -// in the batch, because the length of the list is encoded as varint32, as well as SeqId. -// -// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68. -const emitBatchOverhead = 70 - -var errSpanTooLarge = errors.New("span is too large") - -type udpSender struct { - client *utils.AgentClientUDP - maxPacketSize int // max size of datagram in bytes - maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram - byteBufferSize int // current number of span bytes accumulated in the buffer - spanBuffer []*j.Span // spans buffered before a flush - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span - thriftProtocol thrift.TProtocol - process *j.Process - processByteSize int - - // reporterStats provides access to stats that are only known to Reporter - reporterStats reporterstats.ReporterStats - - // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields, - // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years. - batchSeqNo int64 - tooLargeDroppedSpans int64 - failedToEmitSpans int64 -} - -// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should -// be passed to NewUDPTransportWithParams. -type UDPTransportParams struct { - utils.AgentClientUDPParams -} - -// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent. -// TODO: (breaking change) move to transport/ package. -func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) { - if len(params.HostPort) == 0 { - params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort) - } - - if params.Logger == nil { - params.Logger = log.StdLogger - } - - if params.MaxPacketSize == 0 { - params.MaxPacketSize = utils.UDPPacketMaxLength - } - - protocolFactory := thrift.NewTCompactProtocolFactory() - - // Each span is first written to thriftBuffer to determine its size in bytes. - thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) - thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) - - client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams) - if err != nil { - return nil, err - } - - return &udpSender{ - client: client, - maxSpanBytes: params.MaxPacketSize - emitBatchOverhead, - thriftBuffer: thriftBuffer, - thriftProtocol: thriftProtocol, - }, nil -} - -// NewUDPTransport creates a reporter that submits spans to jaeger-agent. -// TODO: (breaking change) move to transport/ package. -func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) { - return NewUDPTransportWithParams(UDPTransportParams{ - AgentClientUDPParams: utils.AgentClientUDPParams{ - HostPort: hostPort, - MaxPacketSize: maxPacketSize, - }, - }) -} - -// SetReporterStats implements reporterstats.Receiver. -func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) { - s.reporterStats = rs -} - -func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int { - s.thriftBuffer.Reset() - _ = thriftStruct.Write(context.Background(), s.thriftProtocol) - return s.thriftBuffer.Len() -} - -func (s *udpSender) Append(span *Span) (int, error) { - if s.process == nil { - s.process = BuildJaegerProcessThrift(span) - s.processByteSize = s.calcSizeOfSerializedThrift(s.process) - s.byteBufferSize += s.processByteSize - } - jSpan := BuildJaegerThrift(span) - spanSize := s.calcSizeOfSerializedThrift(jSpan) - if spanSize > s.maxSpanBytes { - s.tooLargeDroppedSpans++ - return 1, errSpanTooLarge - } - - s.byteBufferSize += spanSize - if s.byteBufferSize <= s.maxSpanBytes { - s.spanBuffer = append(s.spanBuffer, jSpan) - if s.byteBufferSize < s.maxSpanBytes { - return 0, nil - } - return s.Flush() - } - // the latest span did not fit in the buffer - n, err := s.Flush() - s.spanBuffer = append(s.spanBuffer, jSpan) - s.byteBufferSize = spanSize + s.processByteSize - return n, err -} - -func (s *udpSender) Flush() (int, error) { - n := len(s.spanBuffer) - if n == 0 { - return 0, nil - } - s.batchSeqNo++ - batchSeqNo := int64(s.batchSeqNo) - err := s.client.EmitBatch(context.Background(), &j.Batch{ - Process: s.process, - Spans: s.spanBuffer, - SeqNo: &batchSeqNo, - Stats: s.makeStats(), - }) - s.resetBuffers() - if err != nil { - s.failedToEmitSpans += int64(n) - } - return n, err -} - -func (s *udpSender) Close() error { - return s.client.Close() -} - -func (s *udpSender) resetBuffers() { - for i := range s.spanBuffer { - s.spanBuffer[i] = nil - } - s.spanBuffer = s.spanBuffer[:0] - s.byteBufferSize = s.processByteSize -} - -func (s *udpSender) makeStats() *j.ClientStats { - var dropped int64 - if s.reporterStats != nil { - dropped = s.reporterStats.SpansDroppedFromQueue() - } - return &j.ClientStats{ - FullQueueDroppedSpans: dropped, - TooLargeDroppedSpans: s.tooLargeDroppedSpans, - FailedToEmitSpans: s.failedToEmitSpans, - } -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go deleted file mode 100644 index 237211f822..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" -) - -// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`. -func GetJSON(url string, out interface{}) error { - resp, err := http.Get(url) - if err != nil { - return err - } - return ReadJSON(resp, out) -} - -// ReadJSON reads JSON from http.Response and parses it into `out` -func ReadJSON(resp *http.Response, out interface{}) error { - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body) - } - - if out == nil { - io.Copy(ioutil.Discard, resp.Body) - return nil - } - - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(out) -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go deleted file mode 100644 index b51af7713f..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/localip.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "errors" - "net" -) - -// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go - -// scoreAddr scores how likely the given addr is to be a remote address and returns the -// IP to use when listening. Any address which receives a negative score should not be used. -// Scores are calculated as: -// -1 for any unknown IP addresses. -// +300 for IPv4 addresses -// +100 for non-local addresses, extra +100 for "up" interaces. -func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) { - var ip net.IP - if netAddr, ok := addr.(*net.IPNet); ok { - ip = netAddr.IP - } else if netIP, ok := addr.(*net.IPAddr); ok { - ip = netIP.IP - } else { - return -1, nil - } - - var score int - if ip.To4() != nil { - score += 300 - } - if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() { - score += 100 - if iface.Flags&net.FlagUp != 0 { - score += 100 - } - } - return score, ip -} - -// HostIP tries to find an IP that can be used by other machines to reach this machine. -func HostIP() (net.IP, error) { - interfaces, err := net.Interfaces() - if err != nil { - return nil, err - } - - bestScore := -1 - var bestIP net.IP - // Select the highest scoring IP as the best IP. - for _, iface := range interfaces { - addrs, err := iface.Addrs() - if err != nil { - // Skip this interface if there is an error. - continue - } - - for _, addr := range addrs { - score, ip := scoreAddr(iface, addr) - if score > bestScore { - bestScore = score - bestIP = ip - } - } - } - - if bestScore == -1 { - return nil, errors.New("no addresses to listen on") - } - - return bestIP, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go deleted file mode 100644 index 9875f7f55c..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/rand.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "math/rand" - "sync" -) - -// lockedSource allows a random number generator to be used by multiple goroutines concurrently. -// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed. -type lockedSource struct { - mut sync.Mutex - src rand.Source -} - -// NewRand returns a rand.Rand that is threadsafe. -func NewRand(seed int64) *rand.Rand { - return rand.New(&lockedSource{src: rand.NewSource(seed)}) -} - -func (r *lockedSource) Int63() (n int64) { - r.mut.Lock() - n = r.src.Int63() - r.mut.Unlock() - return -} - -// Seed implements Seed() of Source -func (r *lockedSource) Seed(seed int64) { - r.mut.Lock() - r.src.Seed(seed) - r.mut.Unlock() -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go deleted file mode 100644 index bf2f13165b..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "sync" - "time" -) - -// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits. -// -// TODO (breaking change) remove this interface in favor of public struct below -// -// Deprecated, use ReconfigurableRateLimiter. -type RateLimiter interface { - CheckCredit(itemCost float64) bool -} - -// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a -// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional -// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost -// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased" -// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false. -// -// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the -// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message -// to determine if the message is within the rate limit. -// -// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput -// as bytes/second, and calling CheckCredit() with the actual message size. -// -// TODO (breaking change) rename to RateLimiter once the interface is removed -type ReconfigurableRateLimiter struct { - lock sync.Mutex - - creditsPerSecond float64 - balance float64 - maxBalance float64 - lastTick time.Time - - timeNow func() time.Time -} - -// NewRateLimiter creates a new ReconfigurableRateLimiter. -func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter { - return &ReconfigurableRateLimiter{ - creditsPerSecond: creditsPerSecond, - balance: maxBalance, - maxBalance: maxBalance, - lastTick: time.Now(), - timeNow: time.Now, - } -} - -// CheckCredit tries to reduce the current balance by itemCost provided that the current balance -// is not lest than itemCost. -func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool { - rl.lock.Lock() - defer rl.lock.Unlock() - - // if we have enough credits to pay for current item, then reduce balance and allow - if rl.balance >= itemCost { - rl.balance -= itemCost - return true - } - // otherwise check if balance can be increased due to time elapsed, and try again - rl.updateBalance() - if rl.balance >= itemCost { - rl.balance -= itemCost - return true - } - return false -} - -// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock. -func (rl *ReconfigurableRateLimiter) updateBalance() { - // calculate how much time passed since the last tick, and update current tick - currentTime := rl.timeNow() - elapsedTime := currentTime.Sub(rl.lastTick) - rl.lastTick = currentTime - // calculate how much credit have we accumulated since the last tick - rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond - if rl.balance > rl.maxBalance { - rl.balance = rl.maxBalance - } -} - -// Update changes the main parameters of the rate limiter in-place, while retaining -// the current accumulated balance (pro-rated to the new maxBalance value). Using this method -// instead of creating a new rate limiter helps to avoid thundering herd when sampling -// strategies are updated. -func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) { - rl.lock.Lock() - defer rl.lock.Unlock() - - rl.updateBalance() // get up to date balance - rl.balance = rl.balance * maxBalance / rl.maxBalance - rl.creditsPerSecond = creditsPerSecond - rl.maxBalance = maxBalance -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go deleted file mode 100644 index 0dffc7fa24..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "fmt" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/uber/jaeger-client-go/log" -) - -// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -type reconnectingUDPConn struct { - hostPort string - resolveFunc resolveFunc - dialFunc dialFunc - logger log.Logger - bufferBytes int64 - - connMtx sync.RWMutex - conn *net.UDPConn - destAddr *net.UDPAddr - closeChan chan struct{} -} - -type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) -type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) - -// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is -// different than the current conn then the new address is dialed and the conn is swapped. -func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) { - conn := &reconnectingUDPConn{ - hostPort: hostPort, - resolveFunc: resolveFunc, - dialFunc: dialFunc, - logger: logger, - closeChan: make(chan struct{}), - } - - if err := conn.attemptResolveAndDial(); err != nil { - logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)) - } - - go conn.reconnectLoop(resolveTimeout) - - return conn, nil -} - -func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { - ticker := time.NewTicker(resolveTimeout) - defer ticker.Stop() - - for { - select { - case <-c.closeChan: - return - case <-ticker.C: - if err := c.attemptResolveAndDial(); err != nil { - c.logger.Error(err.Error()) - } - } - } -} - -func (c *reconnectingUDPConn) attemptResolveAndDial() error { - newAddr, err := c.resolveFunc("udp", c.hostPort) - if err != nil { - return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) - } - - c.connMtx.RLock() - curAddr := c.destAddr - c.connMtx.RUnlock() - - // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn - if curAddr != nil && newAddr.String() == curAddr.String() { - return nil - } - - if err := c.attemptDialNewAddr(newAddr); err != nil { - return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) - } - - return nil -} - -func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { - connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) - if err != nil { - return err - } - - if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { - if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { - return err - } - } - - c.connMtx.Lock() - c.destAddr = newAddr - // store prev to close later - prevConn := c.conn - c.conn = connUDP - c.connMtx.Unlock() - - if prevConn != nil { - return prevConn.Close() - } - - return nil -} - -// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning -func (c *reconnectingUDPConn) Write(b []byte) (int, error) { - var bytesWritten int - var err error - - c.connMtx.RLock() - if c.conn == nil { - // if connection is not initialized indicate this with err in order to hook into retry logic - err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") - } else { - bytesWritten, err = c.conn.Write(b) - } - c.connMtx.RUnlock() - - if err == nil { - return bytesWritten, nil - } - - // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again - if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { - c.connMtx.RLock() - defer c.connMtx.RUnlock() - return c.conn.Write(b) - } - - // return original error if reconn fails - return bytesWritten, err -} - -// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation -func (c *reconnectingUDPConn) Close() error { - close(c.closeChan) - - // acquire rw lock before closing conn to ensure calls to Write drain - c.connMtx.Lock() - defer c.connMtx.Unlock() - - if c.conn != nil { - return c.conn.Close() - } - - return nil -} - -// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held -// and SetWriteBuffer is called store bufferBytes to be set for new conns -func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { - var err error - - c.connMtx.RLock() - if c.conn != nil { - err = c.conn.SetWriteBuffer(bytes) - } - c.connMtx.RUnlock() - - if err == nil { - atomic.StoreInt64(&c.bufferBytes, int64(bytes)) - } - - return err -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go deleted file mode 100644 index 4c59ae9dd8..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "time" - - "github.com/uber/jaeger-client-go/log" - "github.com/uber/jaeger-client-go/thrift" - - "github.com/uber/jaeger-client-go/thrift-gen/agent" - "github.com/uber/jaeger-client-go/thrift-gen/jaeger" - "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" -) - -// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent -const UDPPacketMaxLength = 65000 - -// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface. -type AgentClientUDP struct { - agent.Agent - io.Closer - - connUDP udpConn - client *agent.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span -} - -type udpConn interface { - Write([]byte) (int, error) - SetWriteBuffer(int) error - Close() error -} - -// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should -// be passed to NewAgentClientUDPWithParams. -type AgentClientUDPParams struct { - HostPort string - MaxPacketSize int - Logger log.Logger - DisableAttemptReconnecting bool - AttemptReconnectInterval time.Duration -} - -// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) { - // validate hostport - if _, _, err := net.SplitHostPort(params.HostPort); err != nil { - return nil, err - } - - if params.MaxPacketSize == 0 { - params.MaxPacketSize = UDPPacketMaxLength - } - - if params.Logger == nil { - params.Logger = log.StdLogger - } - - if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 { - params.AttemptReconnectInterval = time.Second * 30 - } - - thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactory() - client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory) - - var connUDP udpConn - var err error - - if params.DisableAttemptReconnecting { - destAddr, err := net.ResolveUDPAddr("udp", params.HostPort) - if err != nil { - return nil, err - } - - connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - } else { - // host is hostname, setup resolver loop in case host record changes during operation - connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) - if err != nil { - return nil, err - } - } - - if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { - return nil, err - } - - return &AgentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: params.MaxPacketSize, - thriftBuffer: thriftBuffer, - }, nil -} - -// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) { - return NewAgentClientUDPWithParams(AgentClientUDPParams{ - HostPort: hostPort, - MaxPacketSize: maxPacketSize, - }) -} - -// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface -func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error { - return errors.New("Not implemented") -} - -// EmitBatch implements EmitBatch() of Agent interface -func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error { - a.thriftBuffer.Reset() - if err := a.client.EmitBatch(ctx, batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *AgentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go deleted file mode 100644 index ac3c325d1e..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/utils/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "encoding/binary" - "errors" - "net" - "strconv" - "strings" - "time" -) - -var ( - // ErrEmptyIP an error for empty ip strings - ErrEmptyIP = errors.New("empty string given for ip") - - // ErrNotHostColonPort an error for invalid host port string - ErrNotHostColonPort = errors.New("expecting host:port") - - // ErrNotFourOctets an error for the wrong number of octets after splitting a string - ErrNotFourOctets = errors.New("Wrong number of octets") -) - -// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32 -func ParseIPToUint32(ip string) (uint32, error) { - if ip == "" { - return 0, ErrEmptyIP - } - - if ip == "localhost" { - return 127<<24 | 1, nil - } - - octets := strings.Split(ip, ".") - if len(octets) != 4 { - return 0, ErrNotFourOctets - } - - var intIP uint32 - for i := 0; i < 4; i++ { - octet, err := strconv.Atoi(octets[i]) - if err != nil { - return 0, err - } - intIP = (intIP << 8) | uint32(octet) - } - - return intIP, nil -} - -// ParsePort converts port number from string to uin16 -func ParsePort(portString string) (uint16, error) { - port, err := strconv.ParseUint(portString, 10, 16) - return uint16(port), err -} - -// PackIPAsUint32 packs an IPv4 as uint32 -func PackIPAsUint32(ip net.IP) uint32 { - if ipv4 := ip.To4(); ipv4 != nil { - return binary.BigEndian.Uint32(ipv4) - } - return 0 -} - -// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long -// representing time since epoch in microseconds, which is used expected -// in the Jaeger spans encoded as Thrift. -func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 { - // ^^^ Passing time.Time by value is faster than passing a pointer! - // BenchmarkTimeByValue-8 2000000000 1.37 ns/op - // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op - - return t.UnixNano() / 1000 -} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go deleted file mode 100644 index 98cab4b6ef..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "github.com/opentracing/opentracing-go" -) - -// ZipkinSpanFormat is an OpenTracing carrier format constant -const ZipkinSpanFormat = "zipkin-span-format" - -// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware -// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. -type ExtractableZipkinSpan interface { - TraceID() uint64 - SpanID() uint64 - ParentID() uint64 - Flags() byte -} - -// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware -// RPC frameworks (like TChannel). It does not support baggage, only trace IDs. -type InjectableZipkinSpan interface { - SetTraceID(traceID uint64) - SetSpanID(spanID uint64) - SetParentID(parentID uint64) - SetFlags(flags byte) -} - -type zipkinPropagator struct { - tracer *Tracer -} - -func (p *zipkinPropagator) Inject( - ctx SpanContext, - abstractCarrier interface{}, -) error { - carrier, ok := abstractCarrier.(InjectableZipkinSpan) - if !ok { - return opentracing.ErrInvalidCarrier - } - - carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs - carrier.SetSpanID(uint64(ctx.SpanID())) - carrier.SetParentID(uint64(ctx.ParentID())) - carrier.SetFlags(ctx.samplingState.flags()) - return nil -} - -func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { - carrier, ok := abstractCarrier.(ExtractableZipkinSpan) - if !ok { - return emptyContext, opentracing.ErrInvalidCarrier - } - if carrier.TraceID() == 0 { - return emptyContext, opentracing.ErrSpanContextNotFound - } - var ctx SpanContext - ctx.traceID.Low = carrier.TraceID() - ctx.spanID = SpanID(carrier.SpanID()) - ctx.parentID = SpanID(carrier.ParentID()) - ctx.samplingState = &samplingState{} - ctx.samplingState.setFlags(carrier.Flags()) - return ctx, nil -} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go deleted file mode 100644 index 73aeb000f8..0000000000 --- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "encoding/binary" - "fmt" - "time" - - "github.com/opentracing/opentracing-go/ext" - - "github.com/uber/jaeger-client-go/internal/spanlog" - z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" - "github.com/uber/jaeger-client-go/utils" -) - -const ( - // Zipkin UI does not work well with non-string tag values - allowPackedNumbers = false -) - -var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){ - string(ext.SpanKind): setSpanKind, - string(ext.PeerHostIPv4): setPeerIPv4, - string(ext.PeerPort): setPeerPort, - string(ext.PeerService): setPeerService, - TracerIPTagKey: removeTag, -} - -// BuildZipkinThrift builds thrift span based on internal span. -// TODO: (breaking change) move to transport/zipkin and make private. -func BuildZipkinThrift(s *Span) *z.Span { - span := &zipkinSpan{Span: s} - span.handleSpecialTags() - parentID := int64(span.context.parentID) - var ptrParentID *int64 - if parentID != 0 { - ptrParentID = &parentID - } - traceIDHigh := int64(span.context.traceID.High) - var ptrTraceIDHigh *int64 - if traceIDHigh != 0 { - ptrTraceIDHigh = &traceIDHigh - } - timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) - duration := span.duration.Nanoseconds() / int64(time.Microsecond) - endpoint := &z.Endpoint{ - ServiceName: span.tracer.serviceName, - Ipv4: int32(span.tracer.hostIPv4)} - thriftSpan := &z.Span{ - TraceID: int64(span.context.traceID.Low), - TraceIDHigh: ptrTraceIDHigh, - ID: int64(span.context.spanID), - ParentID: ptrParentID, - Name: span.operationName, - Timestamp: ×tamp, - Duration: &duration, - Debug: span.context.IsDebug(), - Annotations: buildAnnotations(span, endpoint), - BinaryAnnotations: buildBinaryAnnotations(span, endpoint)} - return thriftSpan -} - -func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation { - // automatically adding 2 Zipkin CoreAnnotations - annotations := make([]*z.Annotation, 0, 2+len(span.logs)) - var startLabel, endLabel string - if span.spanKind == string(ext.SpanKindRPCClientEnum) { - startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV - } else if span.spanKind == string(ext.SpanKindRPCServerEnum) { - startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND - } - if !span.startTime.IsZero() && startLabel != "" { - start := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime), - Value: startLabel, - Host: endpoint} - annotations = append(annotations, start) - if span.duration != 0 { - endTs := span.startTime.Add(span.duration) - end := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs), - Value: endLabel, - Host: endpoint} - annotations = append(annotations, end) - } - } - for _, log := range span.logs { - anno := &z.Annotation{ - Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), - Host: endpoint} - if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil { - anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength) - } else { - anno.Value = err.Error() - } - annotations = append(annotations, anno) - } - return annotations -} - -func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation { - // automatically adding local component or server/client address tag, and client version - annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags)) - - if span.peerDefined() && span.isRPC() { - peer := z.Endpoint{ - Ipv4: span.peer.Ipv4, - Port: span.peer.Port, - ServiceName: span.peer.ServiceName} - label := z.CLIENT_ADDR - if span.isRPCClient() { - label = z.SERVER_ADDR - } - anno := &z.BinaryAnnotation{ - Key: label, - Value: []byte{1}, - AnnotationType: z.AnnotationType_BOOL, - Host: &peer} - annotations = append(annotations, anno) - } - if !span.isRPC() { - componentName := endpoint.ServiceName - for _, tag := range span.tags { - if tag.key == string(ext.Component) { - componentName = stringify(tag.value) - break - } - } - local := &z.BinaryAnnotation{ - Key: z.LOCAL_COMPONENT, - Value: []byte(componentName), - AnnotationType: z.AnnotationType_STRING, - Host: endpoint} - annotations = append(annotations, local) - } - for _, tag := range span.tags { - // "Special tags" are already handled by this point, we'd be double reporting the - // tags if we don't skip here - if _, ok := specialTagHandlers[tag.key]; ok { - continue - } - if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil { - annotations = append(annotations, anno) - } - } - return annotations -} - -func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation { - bann := &z.BinaryAnnotation{Key: key, Host: endpoint} - if value, ok := val.(string); ok { - bann.Value = []byte(truncateString(value, maxTagValueLength)) - bann.AnnotationType = z.AnnotationType_STRING - } else if value, ok := val.([]byte); ok { - if len(value) > maxTagValueLength { - value = value[:maxTagValueLength] - } - bann.Value = value - bann.AnnotationType = z.AnnotationType_BYTES - } else if value, ok := val.(int32); ok && allowPackedNumbers { - bann.Value = int32ToBytes(value) - bann.AnnotationType = z.AnnotationType_I32 - } else if value, ok := val.(int64); ok && allowPackedNumbers { - bann.Value = int64ToBytes(value) - bann.AnnotationType = z.AnnotationType_I64 - } else if value, ok := val.(int); ok && allowPackedNumbers { - bann.Value = int64ToBytes(int64(value)) - bann.AnnotationType = z.AnnotationType_I64 - } else if value, ok := val.(bool); ok { - bann.Value = []byte{boolToByte(value)} - bann.AnnotationType = z.AnnotationType_BOOL - } else { - value := stringify(val) - bann.Value = []byte(truncateString(value, maxTagValueLength)) - bann.AnnotationType = z.AnnotationType_STRING - } - return bann -} - -func stringify(value interface{}) string { - if s, ok := value.(string); ok { - return s - } - return fmt.Sprintf("%+v", value) -} - -func truncateString(value string, maxLength int) string { - // we ignore the problem of utf8 runes possibly being sliced in the middle, - // as it is rather expensive to iterate through each tag just to find rune - // boundaries. - if len(value) > maxLength { - return value[:maxLength] - } - return value -} - -func boolToByte(b bool) byte { - if b { - return 1 - } - return 0 -} - -// int32ToBytes converts int32 to bytes. -func int32ToBytes(i int32) []byte { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(i)) - return buf -} - -// int64ToBytes converts int64 to bytes. -func int64ToBytes(i int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - return buf -} - -type zipkinSpan struct { - *Span - - // peer points to the peer service participating in this span, - // e.g. the Client if this span is a server span, - // or Server if this span is a client span - peer struct { - Ipv4 int32 - Port int16 - ServiceName string - } - - // used to distinguish local vs. RPC Server vs. RPC Client spans - spanKind string -} - -func (s *zipkinSpan) handleSpecialTags() { - s.Lock() - defer s.Unlock() - if s.firstInProcess { - // append the process tags - s.tags = append(s.tags, s.tracer.tags...) - } - filteredTags := make([]Tag, 0, len(s.tags)) - for _, tag := range s.tags { - if handler, ok := specialTagHandlers[tag.key]; ok { - handler(s, tag.value) - } else { - filteredTags = append(filteredTags, tag) - } - } - s.tags = filteredTags -} - -func setSpanKind(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - s.spanKind = val - return - } - if val, ok := value.(ext.SpanKindEnum); ok { - s.spanKind = string(val) - } -} - -func setPeerIPv4(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - if ip, err := utils.ParseIPToUint32(val); err == nil { - s.peer.Ipv4 = int32(ip) - return - } - } - if val, ok := value.(uint32); ok { - s.peer.Ipv4 = int32(val) - return - } - if val, ok := value.(int32); ok { - s.peer.Ipv4 = val - } -} - -func setPeerPort(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - if port, err := utils.ParsePort(val); err == nil { - s.peer.Port = int16(port) - return - } - } - if val, ok := value.(uint16); ok { - s.peer.Port = int16(val) - return - } - if val, ok := value.(int); ok { - s.peer.Port = int16(val) - } -} - -func setPeerService(s *zipkinSpan, value interface{}) { - if val, ok := value.(string); ok { - s.peer.ServiceName = val - } -} - -func removeTag(s *zipkinSpan, value interface{}) {} - -func (s *zipkinSpan) peerDefined() bool { - return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0 -} - -func (s *zipkinSpan) isRPC() bool { - s.RLock() - defer s.RUnlock() - return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum) -} - -func (s *zipkinSpan) isRPCClient() bool { - s.RLock() - defer s.RUnlock() - return s.spanKind == string(ext.SpanKindRPCClientEnum) -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go deleted file mode 100644 index 2a6a43efdb..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/counter.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Counter tracks the number of times an event has occurred -type Counter interface { - // Inc adds the given value to the counter. - Inc(int64) -} - -// NullCounter counter that does nothing -var NullCounter Counter = nullCounter{} - -type nullCounter struct{} - -func (nullCounter) Inc(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go deleted file mode 100644 index 0ead061ebd..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/factory.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// NSOptions defines the name and tags map associated with a factory namespace -type NSOptions struct { - Name string - Tags map[string]string -} - -// Options defines the information associated with a metric -type Options struct { - Name string - Tags map[string]string - Help string -} - -// TimerOptions defines the information associated with a metric -type TimerOptions struct { - Name string - Tags map[string]string - Help string - Buckets []time.Duration -} - -// HistogramOptions defines the information associated with a metric -type HistogramOptions struct { - Name string - Tags map[string]string - Help string - Buckets []float64 -} - -// Factory creates new metrics -type Factory interface { - Counter(metric Options) Counter - Timer(metric TimerOptions) Timer - Gauge(metric Options) Gauge - Histogram(metric HistogramOptions) Histogram - - // Namespace returns a nested metrics factory. - Namespace(scope NSOptions) Factory -} - -// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge. -var NullFactory Factory = nullFactory{} - -type nullFactory struct{} - -func (nullFactory) Counter(options Options) Counter { - return NullCounter -} -func (nullFactory) Timer(options TimerOptions) Timer { - return NullTimer -} -func (nullFactory) Gauge(options Options) Gauge { - return NullGauge -} -func (nullFactory) Histogram(options HistogramOptions) Histogram { - return NullHistogram -} -func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go deleted file mode 100644 index 3c606391a0..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Gauge returns instantaneous measurements of something as an int64 value -type Gauge interface { - // Update the gauge to the value passed in. - Update(int64) -} - -// NullGauge gauge that does nothing -var NullGauge Gauge = nullGauge{} - -type nullGauge struct{} - -func (nullGauge) Update(int64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go deleted file mode 100644 index d3bd6174fe..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -// Histogram that keeps track of a distribution of values. -type Histogram interface { - // Records the value passed in. - Record(float64) -} - -// NullHistogram that does nothing -var NullHistogram Histogram = nullHistogram{} - -type nullHistogram struct{} - -func (nullHistogram) Record(float64) {} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go deleted file mode 100644 index c24445a106..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/keys.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "sort" -) - -// GetKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go deleted file mode 100644 index 0df0c662e3..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -// MustInit initializes the passed in metrics and initializes its fields using the passed in factory. -// -// It uses reflection to initialize a struct containing metrics fields -// by assigning new Counter/Gauge/Timer values with the metric name retrieved -// from the `metric` tag and stats tags retrieved from the `tags` tag. -// -// Note: all fields of the struct must be exported, have a `metric` tag, and be -// of type Counter or Gauge or Timer. -// -// Errors during Init lead to a panic. -func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { - if err := Init(metrics, factory, globalTags); err != nil { - panic(err.Error()) - } -} - -// Init does the same as MustInit, but returns an error instead of -// panicking. -func Init(m interface{}, factory Factory, globalTags map[string]string) error { - // Allow user to opt out of reporting metrics by passing in nil. - if factory == nil { - factory = NullFactory - } - - counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem() - gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem() - timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem() - histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem() - - v := reflect.ValueOf(m).Elem() - t := v.Type() - for i := 0; i < t.NumField(); i++ { - tags := make(map[string]string) - for k, v := range globalTags { - tags[k] = v - } - var buckets []float64 - field := t.Field(i) - metric := field.Tag.Get("metric") - if metric == "" { - return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name) - } - if tagString := field.Tag.Get("tags"); tagString != "" { - tagPairs := strings.Split(tagString, ",") - for _, tagPair := range tagPairs { - tag := strings.Split(tagPair, "=") - if len(tag) != 2 { - return fmt.Errorf( - "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]", - field.Name, tagPair, tagString) - } - tags[tag[0]] = tag[1] - } - } - if bucketString := field.Tag.Get("buckets"); bucketString != "" { - if field.Type.AssignableTo(timerPtrType) { - // TODO: Parse timer duration buckets - return fmt.Errorf( - "Field [%s]: Buckets are not currently initialized for timer metrics", - field.Name) - } else if field.Type.AssignableTo(histogramPtrType) { - bucketValues := strings.Split(bucketString, ",") - for _, bucket := range bucketValues { - b, err := strconv.ParseFloat(bucket, 64) - if err != nil { - return fmt.Errorf( - "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]", - field.Name, bucket, bucketString) - } - buckets = append(buckets, b) - } - } else { - return fmt.Errorf( - "Field [%s]: Buckets should only be defined for Timer and Histogram metric types", - field.Name) - } - } - help := field.Tag.Get("help") - var obj interface{} - if field.Type.AssignableTo(counterPtrType) { - obj = factory.Counter(Options{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(gaugePtrType) { - obj = factory.Gauge(Options{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(timerPtrType) { - // TODO: Add buckets once parsed (see TODO above) - obj = factory.Timer(TimerOptions{ - Name: metric, - Tags: tags, - Help: help, - }) - } else if field.Type.AssignableTo(histogramPtrType) { - obj = factory.Histogram(HistogramOptions{ - Name: metric, - Tags: tags, - Help: help, - Buckets: buckets, - }) - } else { - return fmt.Errorf( - "Field %s is not a pointer to timer, gauge, or counter", - field.Name) - } - v.Field(i).Set(reflect.ValueOf(obj)) - } - return nil -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go deleted file mode 100644 index 4a8abdb539..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// StartStopwatch begins recording the executing time of an event, returning -// a Stopwatch that should be used to stop the recording the time for -// that event. Multiple events can be occurring simultaneously each -// represented by different active Stopwatches -func StartStopwatch(timer Timer) Stopwatch { - return Stopwatch{t: timer, start: time.Now()} -} - -// A Stopwatch tracks the execution time of a specific event -type Stopwatch struct { - t Timer - start time.Time -} - -// Stop stops executing of the stopwatch and records the amount of elapsed time -func (s Stopwatch) Stop() { - s.t.Record(s.ElapsedTime()) -} - -// ElapsedTime returns the amount of elapsed time (in time.Duration) -func (s Stopwatch) ElapsedTime() time.Duration { - return time.Since(s.start) -} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go deleted file mode 100644 index e18d222abb..0000000000 --- a/vendor/github.com/uber/jaeger-lib/metrics/timer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import ( - "time" -) - -// Timer accumulates observations about how long some operation took, -// and also maintains a historgam of percentiles. -type Timer interface { - // Records the time passed in. - Record(time.Duration) -} - -// NullTimer timer that does nothing -var NullTimer Timer = nullTimer{} - -type nullTimer struct{} - -func (nullTimer) Record(time.Duration) {} diff --git a/vendor/github.com/xo/terminfo/.gitignore b/vendor/github.com/xo/terminfo/.gitignore new file mode 100644 index 0000000000..368e0c06c3 --- /dev/null +++ b/vendor/github.com/xo/terminfo/.gitignore @@ -0,0 +1,9 @@ +/.cache/ + +/cmd/infocmp/infocmp +/cmd/infocmp/.out/ + +/infocmp +/.out/ + +*.txt diff --git a/vendor/github.com/xo/terminfo/LICENSE b/vendor/github.com/xo/terminfo/LICENSE new file mode 100644 index 0000000000..197dadb12c --- /dev/null +++ b/vendor/github.com/xo/terminfo/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Anmol Sethi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/xo/terminfo/README.md b/vendor/github.com/xo/terminfo/README.md new file mode 100644 index 0000000000..e5002d2390 --- /dev/null +++ b/vendor/github.com/xo/terminfo/README.md @@ -0,0 +1,139 @@ +# About terminfo [![GoDoc][1]][2] + +Package `terminfo` provides a pure-Go implementation of reading information +from the terminfo database. + +`terminfo` is meant as a replacement for `ncurses` in simple Go programs. + +## Installing + +Install in the usual Go way: + +```sh +$ go get -u github.com/xo/terminfo +``` + +## Using + +Please see the [GoDoc API listing][2] for more information on using `terminfo`. + +```go +// _examples/simple/main.go +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "os/signal" + "strings" + "sync" + "syscall" + + "github.com/xo/terminfo" +) + +func main() { + //r := rand.New(nil) + + // load terminfo + ti, err := terminfo.LoadFromEnv() + if err != nil { + log.Fatal(err) + } + + // cleanup + defer func() { + err := recover() + termreset(ti) + if err != nil { + log.Fatal(err) + } + }() + + terminit(ti) + termtitle(ti, "simple example!") + termputs(ti, 3, 3, "Ctrl-C to exit") + maxColors := termcolors(ti) + if maxColors > 256 { + maxColors = 256 + } + for i := 0; i < maxColors; i++ { + termputs(ti, 5+i/16, 5+i%16, ti.Colorf(i, 0, "█")) + } + + // wait for signal + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + <-sigs +} + +// terminit initializes the special CA mode on the terminal, and makes the +// cursor invisible. +func terminit(ti *terminfo.Terminfo) { + buf := new(bytes.Buffer) + // set the cursor invisible + ti.Fprintf(buf, terminfo.CursorInvisible) + // enter special mode + ti.Fprintf(buf, terminfo.EnterCaMode) + // clear the screen + ti.Fprintf(buf, terminfo.ClearScreen) + os.Stdout.Write(buf.Bytes()) +} + +// termreset is the inverse of terminit. +func termreset(ti *terminfo.Terminfo) { + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.ExitCaMode) + ti.Fprintf(buf, terminfo.CursorNormal) + os.Stdout.Write(buf.Bytes()) +} + +// termputs puts a string at row, col, interpolating v. +func termputs(ti *terminfo.Terminfo, row, col int, s string, v ...interface{}) { + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.CursorAddress, row, col) + fmt.Fprintf(buf, s, v...) + os.Stdout.Write(buf.Bytes()) +} + +// sl is the status line terminfo. +var sl *terminfo.Terminfo + +// termtitle sets the window title. +func termtitle(ti *terminfo.Terminfo, s string) { + var once sync.Once + once.Do(func() { + if ti.Has(terminfo.HasStatusLine) { + return + } + // load the sl xterm if terminal is an xterm or has COLORTERM + if strings.Contains(strings.ToLower(os.Getenv("TERM")), "xterm") || os.Getenv("COLORTERM") == "truecolor" { + sl, _ = terminfo.Load("xterm+sl") + } + }) + if sl != nil { + ti = sl + } + if !ti.Has(terminfo.HasStatusLine) { + return + } + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.ToStatusLine) + fmt.Fprint(buf, s) + ti.Fprintf(buf, terminfo.FromStatusLine) + os.Stdout.Write(buf.Bytes()) +} + +// termcolors returns the maximum colors available for the terminal. +func termcolors(ti *terminfo.Terminfo) int { + if colors := ti.Num(terminfo.MaxColors); colors > 0 { + return colors + } + return int(terminfo.ColorLevelBasic) +} +``` + +[1]: https://godoc.org/github.com/xo/terminfo?status.svg +[2]: https://godoc.org/github.com/xo/terminfo diff --git a/vendor/github.com/xo/terminfo/caps.go b/vendor/github.com/xo/terminfo/caps.go new file mode 100644 index 0000000000..9674aaa7b5 --- /dev/null +++ b/vendor/github.com/xo/terminfo/caps.go @@ -0,0 +1,33 @@ +package terminfo + +//go:generate go run gen.go + +// BoolCapName returns the bool capability name. +func BoolCapName(i int) string { + return boolCapNames[2*i] +} + +// BoolCapNameShort returns the short bool capability name. +func BoolCapNameShort(i int) string { + return boolCapNames[2*i+1] +} + +// NumCapName returns the num capability name. +func NumCapName(i int) string { + return numCapNames[2*i] +} + +// NumCapNameShort returns the short num capability name. +func NumCapNameShort(i int) string { + return numCapNames[2*i+1] +} + +// StringCapName returns the string capability name. +func StringCapName(i int) string { + return stringCapNames[2*i] +} + +// StringCapNameShort returns the short string capability name. +func StringCapNameShort(i int) string { + return stringCapNames[2*i+1] +} diff --git a/vendor/github.com/xo/terminfo/capvals.go b/vendor/github.com/xo/terminfo/capvals.go new file mode 100644 index 0000000000..8528740ea3 --- /dev/null +++ b/vendor/github.com/xo/terminfo/capvals.go @@ -0,0 +1,2023 @@ +package terminfo + +// Code generated by gen.go. DO NOT EDIT. + +// Bool capabilities. +const ( + // The AutoLeftMargin [auto_left_margin, bw] bool capability indicates cub1 wraps from column 0 to last column. + AutoLeftMargin = iota + + // The AutoRightMargin [auto_right_margin, am] bool capability indicates terminal has automatic margins. + AutoRightMargin + + // The NoEscCtlc [no_esc_ctlc, xsb] bool capability indicates beehive (f1=escape, f2=ctrl C). + NoEscCtlc + + // The CeolStandoutGlitch [ceol_standout_glitch, xhp] bool capability indicates standout not erased by overwriting (hp). + CeolStandoutGlitch + + // The EatNewlineGlitch [eat_newline_glitch, xenl] bool capability indicates newline ignored after 80 cols (concept). + EatNewlineGlitch + + // The EraseOverstrike [erase_overstrike, eo] bool capability indicates can erase overstrikes with a blank. + EraseOverstrike + + // The GenericType [generic_type, gn] bool capability indicates generic line type. + GenericType + + // The HardCopy [hard_copy, hc] bool capability indicates hardcopy terminal. + HardCopy + + // The HasMetaKey [has_meta_key, km] bool capability indicates Has a meta key (i.e., sets 8th-bit). + HasMetaKey + + // The HasStatusLine [has_status_line, hs] bool capability indicates has extra status line. + HasStatusLine + + // The InsertNullGlitch [insert_null_glitch, in] bool capability indicates insert mode distinguishes nulls. + InsertNullGlitch + + // The MemoryAbove [memory_above, da] bool capability indicates display may be retained above the screen. + MemoryAbove + + // The MemoryBelow [memory_below, db] bool capability indicates display may be retained below the screen. + MemoryBelow + + // The MoveInsertMode [move_insert_mode, mir] bool capability indicates safe to move while in insert mode. + MoveInsertMode + + // The MoveStandoutMode [move_standout_mode, msgr] bool capability indicates safe to move while in standout mode. + MoveStandoutMode + + // The OverStrike [over_strike, os] bool capability indicates terminal can overstrike. + OverStrike + + // The StatusLineEscOk [status_line_esc_ok, eslok] bool capability indicates escape can be used on the status line. + StatusLineEscOk + + // The DestTabsMagicSmso [dest_tabs_magic_smso, xt] bool capability indicates tabs destructive, magic so char (t1061). + DestTabsMagicSmso + + // The TildeGlitch [tilde_glitch, hz] bool capability indicates cannot print ~'s (Hazeltine). + TildeGlitch + + // The TransparentUnderline [transparent_underline, ul] bool capability indicates underline character overstrikes. + TransparentUnderline + + // The XonXoff [xon_xoff, xon] bool capability indicates terminal uses xon/xoff handshaking. + XonXoff + + // The NeedsXonXoff [needs_xon_xoff, nxon] bool capability indicates padding will not work, xon/xoff required. + NeedsXonXoff + + // The PrtrSilent [prtr_silent, mc5i] bool capability indicates printer will not echo on screen. + PrtrSilent + + // The HardCursor [hard_cursor, chts] bool capability indicates cursor is hard to see. + HardCursor + + // The NonRevRmcup [non_rev_rmcup, nrrmc] bool capability indicates smcup does not reverse rmcup. + NonRevRmcup + + // The NoPadChar [no_pad_char, npc] bool capability indicates pad character does not exist. + NoPadChar + + // The NonDestScrollRegion [non_dest_scroll_region, ndscr] bool capability indicates scrolling region is non-destructive. + NonDestScrollRegion + + // The CanChange [can_change, ccc] bool capability indicates terminal can re-define existing colors. + CanChange + + // The BackColorErase [back_color_erase, bce] bool capability indicates screen erased with background color. + BackColorErase + + // The HueLightnessSaturation [hue_lightness_saturation, hls] bool capability indicates terminal uses only HLS color notation (Tektronix). + HueLightnessSaturation + + // The ColAddrGlitch [col_addr_glitch, xhpa] bool capability indicates only positive motion for hpa/mhpa caps. + ColAddrGlitch + + // The CrCancelsMicroMode [cr_cancels_micro_mode, crxm] bool capability indicates using cr turns off micro mode. + CrCancelsMicroMode + + // The HasPrintWheel [has_print_wheel, daisy] bool capability indicates printer needs operator to change character set. + HasPrintWheel + + // The RowAddrGlitch [row_addr_glitch, xvpa] bool capability indicates only positive motion for vpa/mvpa caps. + RowAddrGlitch + + // The SemiAutoRightMargin [semi_auto_right_margin, sam] bool capability indicates printing in last column causes cr. + SemiAutoRightMargin + + // The CpiChangesRes [cpi_changes_res, cpix] bool capability indicates changing character pitch changes resolution. + CpiChangesRes + + // The LpiChangesRes [lpi_changes_res, lpix] bool capability indicates changing line pitch changes resolution. + LpiChangesRes + + // The BackspacesWithBs [backspaces_with_bs, OTbs] bool capability indicates uses ^H to move left. + BackspacesWithBs + + // The CrtNoScrolling [crt_no_scrolling, OTns] bool capability indicates crt cannot scroll. + CrtNoScrolling + + // The NoCorrectlyWorkingCr [no_correctly_working_cr, OTnc] bool capability indicates no way to go to start of line. + NoCorrectlyWorkingCr + + // The GnuHasMetaKey [gnu_has_meta_key, OTMT] bool capability indicates has meta key. + GnuHasMetaKey + + // The LinefeedIsNewline [linefeed_is_newline, OTNL] bool capability indicates move down with \n. + LinefeedIsNewline + + // The HasHardwareTabs [has_hardware_tabs, OTpt] bool capability indicates has 8-char tabs invoked with ^I. + HasHardwareTabs + + // The ReturnDoesClrEol [return_does_clr_eol, OTxr] bool capability indicates return clears the line. + ReturnDoesClrEol +) + +// Num capabilities. +const ( + // The Columns [columns, cols] num capability is number of columns in a line. + Columns = iota + + // The InitTabs [init_tabs, it] num capability is tabs initially every # spaces. + InitTabs + + // The Lines [lines, lines] num capability is number of lines on screen or page. + Lines + + // The LinesOfMemory [lines_of_memory, lm] num capability is lines of memory if > line. 0 means varies. + LinesOfMemory + + // The MagicCookieGlitch [magic_cookie_glitch, xmc] num capability is number of blank characters left by smso or rmso. + MagicCookieGlitch + + // The PaddingBaudRate [padding_baud_rate, pb] num capability is lowest baud rate where padding needed. + PaddingBaudRate + + // The VirtualTerminal [virtual_terminal, vt] num capability is virtual terminal number (CB/unix). + VirtualTerminal + + // The WidthStatusLine [width_status_line, wsl] num capability is number of columns in status line. + WidthStatusLine + + // The NumLabels [num_labels, nlab] num capability is number of labels on screen. + NumLabels + + // The LabelHeight [label_height, lh] num capability is rows in each label. + LabelHeight + + // The LabelWidth [label_width, lw] num capability is columns in each label. + LabelWidth + + // The MaxAttributes [max_attributes, ma] num capability is maximum combined attributes terminal can handle. + MaxAttributes + + // The MaximumWindows [maximum_windows, wnum] num capability is maximum number of definable windows. + MaximumWindows + + // The MaxColors [max_colors, colors] num capability is maximum number of colors on screen. + MaxColors + + // The MaxPairs [max_pairs, pairs] num capability is maximum number of color-pairs on the screen. + MaxPairs + + // The NoColorVideo [no_color_video, ncv] num capability is video attributes that cannot be used with colors. + NoColorVideo + + // The BufferCapacity [buffer_capacity, bufsz] num capability is numbers of bytes buffered before printing. + BufferCapacity + + // The DotVertSpacing [dot_vert_spacing, spinv] num capability is spacing of pins vertically in pins per inch. + DotVertSpacing + + // The DotHorzSpacing [dot_horz_spacing, spinh] num capability is spacing of dots horizontally in dots per inch. + DotHorzSpacing + + // The MaxMicroAddress [max_micro_address, maddr] num capability is maximum value in micro_..._address. + MaxMicroAddress + + // The MaxMicroJump [max_micro_jump, mjump] num capability is maximum value in parm_..._micro. + MaxMicroJump + + // The MicroColSize [micro_col_size, mcs] num capability is character step size when in micro mode. + MicroColSize + + // The MicroLineSize [micro_line_size, mls] num capability is line step size when in micro mode. + MicroLineSize + + // The NumberOfPins [number_of_pins, npins] num capability is numbers of pins in print-head. + NumberOfPins + + // The OutputResChar [output_res_char, orc] num capability is horizontal resolution in units per line. + OutputResChar + + // The OutputResLine [output_res_line, orl] num capability is vertical resolution in units per line. + OutputResLine + + // The OutputResHorzInch [output_res_horz_inch, orhi] num capability is horizontal resolution in units per inch. + OutputResHorzInch + + // The OutputResVertInch [output_res_vert_inch, orvi] num capability is vertical resolution in units per inch. + OutputResVertInch + + // The PrintRate [print_rate, cps] num capability is print rate in characters per second. + PrintRate + + // The WideCharSize [wide_char_size, widcs] num capability is character step size when in double wide mode. + WideCharSize + + // The Buttons [buttons, btns] num capability is number of buttons on mouse. + Buttons + + // The BitImageEntwining [bit_image_entwining, bitwin] num capability is number of passes for each bit-image row. + BitImageEntwining + + // The BitImageType [bit_image_type, bitype] num capability is type of bit-image device. + BitImageType + + // The MagicCookieGlitchUl [magic_cookie_glitch_ul, OTug] num capability is number of blanks left by ul. + MagicCookieGlitchUl + + // The CarriageReturnDelay [carriage_return_delay, OTdC] num capability is pad needed for CR. + CarriageReturnDelay + + // The NewLineDelay [new_line_delay, OTdN] num capability is pad needed for LF. + NewLineDelay + + // The BackspaceDelay [backspace_delay, OTdB] num capability is padding required for ^H. + BackspaceDelay + + // The HorizontalTabDelay [horizontal_tab_delay, OTdT] num capability is padding required for ^I. + HorizontalTabDelay + + // The NumberOfFunctionKeys [number_of_function_keys, OTkn] num capability is count of function keys. + NumberOfFunctionKeys +) + +// String capabilities. +const ( + // The BackTab [back_tab, cbt] string capability is the back tab (P). + BackTab = iota + + // The Bell [bell, bel] string capability is the audible signal (bell) (P). + Bell + + // The CarriageReturn [carriage_return, cr] string capability is the carriage return (P*) (P*). + CarriageReturn + + // The ChangeScrollRegion [change_scroll_region, csr] string capability is the change region to line #1 to line #2 (P). + ChangeScrollRegion + + // The ClearAllTabs [clear_all_tabs, tbc] string capability is the clear all tab stops (P). + ClearAllTabs + + // The ClearScreen [clear_screen, clear] string capability is the clear screen and home cursor (P*). + ClearScreen + + // The ClrEol [clr_eol, el] string capability is the clear to end of line (P). + ClrEol + + // The ClrEos [clr_eos, ed] string capability is the clear to end of screen (P*). + ClrEos + + // The ColumnAddress [column_address, hpa] string capability is the horizontal position #1, absolute (P). + ColumnAddress + + // The CommandCharacter [command_character, cmdch] string capability is the terminal settable cmd character in prototype !?. + CommandCharacter + + // The CursorAddress [cursor_address, cup] string capability is the move to row #1 columns #2. + CursorAddress + + // The CursorDown [cursor_down, cud1] string capability is the down one line. + CursorDown + + // The CursorHome [cursor_home, home] string capability is the home cursor (if no cup). + CursorHome + + // The CursorInvisible [cursor_invisible, civis] string capability is the make cursor invisible. + CursorInvisible + + // The CursorLeft [cursor_left, cub1] string capability is the move left one space. + CursorLeft + + // The CursorMemAddress [cursor_mem_address, mrcup] string capability is the memory relative cursor addressing, move to row #1 columns #2. + CursorMemAddress + + // The CursorNormal [cursor_normal, cnorm] string capability is the make cursor appear normal (undo civis/cvvis). + CursorNormal + + // The CursorRight [cursor_right, cuf1] string capability is the non-destructive space (move right one space). + CursorRight + + // The CursorToLl [cursor_to_ll, ll] string capability is the last line, first column (if no cup). + CursorToLl + + // The CursorUp [cursor_up, cuu1] string capability is the up one line. + CursorUp + + // The CursorVisible [cursor_visible, cvvis] string capability is the make cursor very visible. + CursorVisible + + // The DeleteCharacter [delete_character, dch1] string capability is the delete character (P*). + DeleteCharacter + + // The DeleteLine [delete_line, dl1] string capability is the delete line (P*). + DeleteLine + + // The DisStatusLine [dis_status_line, dsl] string capability is the disable status line. + DisStatusLine + + // The DownHalfLine [down_half_line, hd] string capability is the half a line down. + DownHalfLine + + // The EnterAltCharsetMode [enter_alt_charset_mode, smacs] string capability is the start alternate character set (P). + EnterAltCharsetMode + + // The EnterBlinkMode [enter_blink_mode, blink] string capability is the turn on blinking. + EnterBlinkMode + + // The EnterBoldMode [enter_bold_mode, bold] string capability is the turn on bold (extra bright) mode. + EnterBoldMode + + // The EnterCaMode [enter_ca_mode, smcup] string capability is the string to start programs using cup. + EnterCaMode + + // The EnterDeleteMode [enter_delete_mode, smdc] string capability is the enter delete mode. + EnterDeleteMode + + // The EnterDimMode [enter_dim_mode, dim] string capability is the turn on half-bright mode. + EnterDimMode + + // The EnterInsertMode [enter_insert_mode, smir] string capability is the enter insert mode. + EnterInsertMode + + // The EnterSecureMode [enter_secure_mode, invis] string capability is the turn on blank mode (characters invisible). + EnterSecureMode + + // The EnterProtectedMode [enter_protected_mode, prot] string capability is the turn on protected mode. + EnterProtectedMode + + // The EnterReverseMode [enter_reverse_mode, rev] string capability is the turn on reverse video mode. + EnterReverseMode + + // The EnterStandoutMode [enter_standout_mode, smso] string capability is the begin standout mode. + EnterStandoutMode + + // The EnterUnderlineMode [enter_underline_mode, smul] string capability is the begin underline mode. + EnterUnderlineMode + + // The EraseChars [erase_chars, ech] string capability is the erase #1 characters (P). + EraseChars + + // The ExitAltCharsetMode [exit_alt_charset_mode, rmacs] string capability is the end alternate character set (P). + ExitAltCharsetMode + + // The ExitAttributeMode [exit_attribute_mode, sgr0] string capability is the turn off all attributes. + ExitAttributeMode + + // The ExitCaMode [exit_ca_mode, rmcup] string capability is the strings to end programs using cup. + ExitCaMode + + // The ExitDeleteMode [exit_delete_mode, rmdc] string capability is the end delete mode. + ExitDeleteMode + + // The ExitInsertMode [exit_insert_mode, rmir] string capability is the exit insert mode. + ExitInsertMode + + // The ExitStandoutMode [exit_standout_mode, rmso] string capability is the exit standout mode. + ExitStandoutMode + + // The ExitUnderlineMode [exit_underline_mode, rmul] string capability is the exit underline mode. + ExitUnderlineMode + + // The FlashScreen [flash_screen, flash] string capability is the visible bell (may not move cursor). + FlashScreen + + // The FormFeed [form_feed, ff] string capability is the hardcopy terminal page eject (P*). + FormFeed + + // The FromStatusLine [from_status_line, fsl] string capability is the return from status line. + FromStatusLine + + // The Init1string [init_1string, is1] string capability is the initialization string. + Init1string + + // The Init2string [init_2string, is2] string capability is the initialization string. + Init2string + + // The Init3string [init_3string, is3] string capability is the initialization string. + Init3string + + // The InitFile [init_file, if] string capability is the name of initialization file. + InitFile + + // The InsertCharacter [insert_character, ich1] string capability is the insert character (P). + InsertCharacter + + // The InsertLine [insert_line, il1] string capability is the insert line (P*). + InsertLine + + // The InsertPadding [insert_padding, ip] string capability is the insert padding after inserted character. + InsertPadding + + // The KeyBackspace [key_backspace, kbs] string capability is the backspace key. + KeyBackspace + + // The KeyCatab [key_catab, ktbc] string capability is the clear-all-tabs key. + KeyCatab + + // The KeyClear [key_clear, kclr] string capability is the clear-screen or erase key. + KeyClear + + // The KeyCtab [key_ctab, kctab] string capability is the clear-tab key. + KeyCtab + + // The KeyDc [key_dc, kdch1] string capability is the delete-character key. + KeyDc + + // The KeyDl [key_dl, kdl1] string capability is the delete-line key. + KeyDl + + // The KeyDown [key_down, kcud1] string capability is the down-arrow key. + KeyDown + + // The KeyEic [key_eic, krmir] string capability is the sent by rmir or smir in insert mode. + KeyEic + + // The KeyEol [key_eol, kel] string capability is the clear-to-end-of-line key. + KeyEol + + // The KeyEos [key_eos, ked] string capability is the clear-to-end-of-screen key. + KeyEos + + // The KeyF0 [key_f0, kf0] string capability is the F0 function key. + KeyF0 + + // The KeyF1 [key_f1, kf1] string capability is the F1 function key. + KeyF1 + + // The KeyF10 [key_f10, kf10] string capability is the F10 function key. + KeyF10 + + // The KeyF2 [key_f2, kf2] string capability is the F2 function key. + KeyF2 + + // The KeyF3 [key_f3, kf3] string capability is the F3 function key. + KeyF3 + + // The KeyF4 [key_f4, kf4] string capability is the F4 function key. + KeyF4 + + // The KeyF5 [key_f5, kf5] string capability is the F5 function key. + KeyF5 + + // The KeyF6 [key_f6, kf6] string capability is the F6 function key. + KeyF6 + + // The KeyF7 [key_f7, kf7] string capability is the F7 function key. + KeyF7 + + // The KeyF8 [key_f8, kf8] string capability is the F8 function key. + KeyF8 + + // The KeyF9 [key_f9, kf9] string capability is the F9 function key. + KeyF9 + + // The KeyHome [key_home, khome] string capability is the home key. + KeyHome + + // The KeyIc [key_ic, kich1] string capability is the insert-character key. + KeyIc + + // The KeyIl [key_il, kil1] string capability is the insert-line key. + KeyIl + + // The KeyLeft [key_left, kcub1] string capability is the left-arrow key. + KeyLeft + + // The KeyLl [key_ll, kll] string capability is the lower-left key (home down). + KeyLl + + // The KeyNpage [key_npage, knp] string capability is the next-page key. + KeyNpage + + // The KeyPpage [key_ppage, kpp] string capability is the previous-page key. + KeyPpage + + // The KeyRight [key_right, kcuf1] string capability is the right-arrow key. + KeyRight + + // The KeySf [key_sf, kind] string capability is the scroll-forward key. + KeySf + + // The KeySr [key_sr, kri] string capability is the scroll-backward key. + KeySr + + // The KeyStab [key_stab, khts] string capability is the set-tab key. + KeyStab + + // The KeyUp [key_up, kcuu1] string capability is the up-arrow key. + KeyUp + + // The KeypadLocal [keypad_local, rmkx] string capability is the leave 'keyboard_transmit' mode. + KeypadLocal + + // The KeypadXmit [keypad_xmit, smkx] string capability is the enter 'keyboard_transmit' mode. + KeypadXmit + + // The LabF0 [lab_f0, lf0] string capability is the label on function key f0 if not f0. + LabF0 + + // The LabF1 [lab_f1, lf1] string capability is the label on function key f1 if not f1. + LabF1 + + // The LabF10 [lab_f10, lf10] string capability is the label on function key f10 if not f10. + LabF10 + + // The LabF2 [lab_f2, lf2] string capability is the label on function key f2 if not f2. + LabF2 + + // The LabF3 [lab_f3, lf3] string capability is the label on function key f3 if not f3. + LabF3 + + // The LabF4 [lab_f4, lf4] string capability is the label on function key f4 if not f4. + LabF4 + + // The LabF5 [lab_f5, lf5] string capability is the label on function key f5 if not f5. + LabF5 + + // The LabF6 [lab_f6, lf6] string capability is the label on function key f6 if not f6. + LabF6 + + // The LabF7 [lab_f7, lf7] string capability is the label on function key f7 if not f7. + LabF7 + + // The LabF8 [lab_f8, lf8] string capability is the label on function key f8 if not f8. + LabF8 + + // The LabF9 [lab_f9, lf9] string capability is the label on function key f9 if not f9. + LabF9 + + // The MetaOff [meta_off, rmm] string capability is the turn off meta mode. + MetaOff + + // The MetaOn [meta_on, smm] string capability is the turn on meta mode (8th-bit on). + MetaOn + + // The Newline [newline, nel] string capability is the newline (behave like cr followed by lf). + Newline + + // The PadChar [pad_char, pad] string capability is the padding char (instead of null). + PadChar + + // The ParmDch [parm_dch, dch] string capability is the delete #1 characters (P*). + ParmDch + + // The ParmDeleteLine [parm_delete_line, dl] string capability is the delete #1 lines (P*). + ParmDeleteLine + + // The ParmDownCursor [parm_down_cursor, cud] string capability is the down #1 lines (P*). + ParmDownCursor + + // The ParmIch [parm_ich, ich] string capability is the insert #1 characters (P*). + ParmIch + + // The ParmIndex [parm_index, indn] string capability is the scroll forward #1 lines (P). + ParmIndex + + // The ParmInsertLine [parm_insert_line, il] string capability is the insert #1 lines (P*). + ParmInsertLine + + // The ParmLeftCursor [parm_left_cursor, cub] string capability is the move #1 characters to the left (P). + ParmLeftCursor + + // The ParmRightCursor [parm_right_cursor, cuf] string capability is the move #1 characters to the right (P*). + ParmRightCursor + + // The ParmRindex [parm_rindex, rin] string capability is the scroll back #1 lines (P). + ParmRindex + + // The ParmUpCursor [parm_up_cursor, cuu] string capability is the up #1 lines (P*). + ParmUpCursor + + // The PkeyKey [pkey_key, pfkey] string capability is the program function key #1 to type string #2. + PkeyKey + + // The PkeyLocal [pkey_local, pfloc] string capability is the program function key #1 to execute string #2. + PkeyLocal + + // The PkeyXmit [pkey_xmit, pfx] string capability is the program function key #1 to transmit string #2. + PkeyXmit + + // The PrintScreen [print_screen, mc0] string capability is the print contents of screen. + PrintScreen + + // The PrtrOff [prtr_off, mc4] string capability is the turn off printer. + PrtrOff + + // The PrtrOn [prtr_on, mc5] string capability is the turn on printer. + PrtrOn + + // The RepeatChar [repeat_char, rep] string capability is the repeat char #1 #2 times (P*). + RepeatChar + + // The Reset1string [reset_1string, rs1] string capability is the reset string. + Reset1string + + // The Reset2string [reset_2string, rs2] string capability is the reset string. + Reset2string + + // The Reset3string [reset_3string, rs3] string capability is the reset string. + Reset3string + + // The ResetFile [reset_file, rf] string capability is the name of reset file. + ResetFile + + // The RestoreCursor [restore_cursor, rc] string capability is the restore cursor to position of last save_cursor. + RestoreCursor + + // The RowAddress [row_address, vpa] string capability is the vertical position #1 absolute (P). + RowAddress + + // The SaveCursor [save_cursor, sc] string capability is the save current cursor position (P). + SaveCursor + + // The ScrollForward [scroll_forward, ind] string capability is the scroll text up (P). + ScrollForward + + // The ScrollReverse [scroll_reverse, ri] string capability is the scroll text down (P). + ScrollReverse + + // The SetAttributes [set_attributes, sgr] string capability is the define video attributes #1-#9 (PG9). + SetAttributes + + // The SetTab [set_tab, hts] string capability is the set a tab in every row, current columns. + SetTab + + // The SetWindow [set_window, wind] string capability is the current window is lines #1-#2 cols #3-#4. + SetWindow + + // The Tab [tab, ht] string capability is the tab to next 8-space hardware tab stop. + Tab + + // The ToStatusLine [to_status_line, tsl] string capability is the move to status line, column #1. + ToStatusLine + + // The UnderlineChar [underline_char, uc] string capability is the underline char and move past it. + UnderlineChar + + // The UpHalfLine [up_half_line, hu] string capability is the half a line up. + UpHalfLine + + // The InitProg [init_prog, iprog] string capability is the path name of program for initialization. + InitProg + + // The KeyA1 [key_a1, ka1] string capability is the upper left of keypad. + KeyA1 + + // The KeyA3 [key_a3, ka3] string capability is the upper right of keypad. + KeyA3 + + // The KeyB2 [key_b2, kb2] string capability is the center of keypad. + KeyB2 + + // The KeyC1 [key_c1, kc1] string capability is the lower left of keypad. + KeyC1 + + // The KeyC3 [key_c3, kc3] string capability is the lower right of keypad. + KeyC3 + + // The PrtrNon [prtr_non, mc5p] string capability is the turn on printer for #1 bytes. + PrtrNon + + // The CharPadding [char_padding, rmp] string capability is the like ip but when in insert mode. + CharPadding + + // The AcsChars [acs_chars, acsc] string capability is the graphics charset pairs, based on vt100. + AcsChars + + // The PlabNorm [plab_norm, pln] string capability is the program label #1 to show string #2. + PlabNorm + + // The KeyBtab [key_btab, kcbt] string capability is the back-tab key. + KeyBtab + + // The EnterXonMode [enter_xon_mode, smxon] string capability is the turn on xon/xoff handshaking. + EnterXonMode + + // The ExitXonMode [exit_xon_mode, rmxon] string capability is the turn off xon/xoff handshaking. + ExitXonMode + + // The EnterAmMode [enter_am_mode, smam] string capability is the turn on automatic margins. + EnterAmMode + + // The ExitAmMode [exit_am_mode, rmam] string capability is the turn off automatic margins. + ExitAmMode + + // The XonCharacter [xon_character, xonc] string capability is the XON character. + XonCharacter + + // The XoffCharacter [xoff_character, xoffc] string capability is the XOFF character. + XoffCharacter + + // The EnaAcs [ena_acs, enacs] string capability is the enable alternate char set. + EnaAcs + + // The LabelOn [label_on, smln] string capability is the turn on soft labels. + LabelOn + + // The LabelOff [label_off, rmln] string capability is the turn off soft labels. + LabelOff + + // The KeyBeg [key_beg, kbeg] string capability is the begin key. + KeyBeg + + // The KeyCancel [key_cancel, kcan] string capability is the cancel key. + KeyCancel + + // The KeyClose [key_close, kclo] string capability is the close key. + KeyClose + + // The KeyCommand [key_command, kcmd] string capability is the command key. + KeyCommand + + // The KeyCopy [key_copy, kcpy] string capability is the copy key. + KeyCopy + + // The KeyCreate [key_create, kcrt] string capability is the create key. + KeyCreate + + // The KeyEnd [key_end, kend] string capability is the end key. + KeyEnd + + // The KeyEnter [key_enter, kent] string capability is the enter/send key. + KeyEnter + + // The KeyExit [key_exit, kext] string capability is the exit key. + KeyExit + + // The KeyFind [key_find, kfnd] string capability is the find key. + KeyFind + + // The KeyHelp [key_help, khlp] string capability is the help key. + KeyHelp + + // The KeyMark [key_mark, kmrk] string capability is the mark key. + KeyMark + + // The KeyMessage [key_message, kmsg] string capability is the message key. + KeyMessage + + // The KeyMove [key_move, kmov] string capability is the move key. + KeyMove + + // The KeyNext [key_next, knxt] string capability is the next key. + KeyNext + + // The KeyOpen [key_open, kopn] string capability is the open key. + KeyOpen + + // The KeyOptions [key_options, kopt] string capability is the options key. + KeyOptions + + // The KeyPrevious [key_previous, kprv] string capability is the previous key. + KeyPrevious + + // The KeyPrint [key_print, kprt] string capability is the print key. + KeyPrint + + // The KeyRedo [key_redo, krdo] string capability is the redo key. + KeyRedo + + // The KeyReference [key_reference, kref] string capability is the reference key. + KeyReference + + // The KeyRefresh [key_refresh, krfr] string capability is the refresh key. + KeyRefresh + + // The KeyReplace [key_replace, krpl] string capability is the replace key. + KeyReplace + + // The KeyRestart [key_restart, krst] string capability is the restart key. + KeyRestart + + // The KeyResume [key_resume, kres] string capability is the resume key. + KeyResume + + // The KeySave [key_save, ksav] string capability is the save key. + KeySave + + // The KeySuspend [key_suspend, kspd] string capability is the suspend key. + KeySuspend + + // The KeyUndo [key_undo, kund] string capability is the undo key. + KeyUndo + + // The KeySbeg [key_sbeg, kBEG] string capability is the shifted begin key. + KeySbeg + + // The KeyScancel [key_scancel, kCAN] string capability is the shifted cancel key. + KeyScancel + + // The KeyScommand [key_scommand, kCMD] string capability is the shifted command key. + KeyScommand + + // The KeyScopy [key_scopy, kCPY] string capability is the shifted copy key. + KeyScopy + + // The KeyScreate [key_screate, kCRT] string capability is the shifted create key. + KeyScreate + + // The KeySdc [key_sdc, kDC] string capability is the shifted delete-character key. + KeySdc + + // The KeySdl [key_sdl, kDL] string capability is the shifted delete-line key. + KeySdl + + // The KeySelect [key_select, kslt] string capability is the select key. + KeySelect + + // The KeySend [key_send, kEND] string capability is the shifted end key. + KeySend + + // The KeySeol [key_seol, kEOL] string capability is the shifted clear-to-end-of-line key. + KeySeol + + // The KeySexit [key_sexit, kEXT] string capability is the shifted exit key. + KeySexit + + // The KeySfind [key_sfind, kFND] string capability is the shifted find key. + KeySfind + + // The KeyShelp [key_shelp, kHLP] string capability is the shifted help key. + KeyShelp + + // The KeyShome [key_shome, kHOM] string capability is the shifted home key. + KeyShome + + // The KeySic [key_sic, kIC] string capability is the shifted insert-character key. + KeySic + + // The KeySleft [key_sleft, kLFT] string capability is the shifted left-arrow key. + KeySleft + + // The KeySmessage [key_smessage, kMSG] string capability is the shifted message key. + KeySmessage + + // The KeySmove [key_smove, kMOV] string capability is the shifted move key. + KeySmove + + // The KeySnext [key_snext, kNXT] string capability is the shifted next key. + KeySnext + + // The KeySoptions [key_soptions, kOPT] string capability is the shifted options key. + KeySoptions + + // The KeySprevious [key_sprevious, kPRV] string capability is the shifted previous key. + KeySprevious + + // The KeySprint [key_sprint, kPRT] string capability is the shifted print key. + KeySprint + + // The KeySredo [key_sredo, kRDO] string capability is the shifted redo key. + KeySredo + + // The KeySreplace [key_sreplace, kRPL] string capability is the shifted replace key. + KeySreplace + + // The KeySright [key_sright, kRIT] string capability is the shifted right-arrow key. + KeySright + + // The KeySrsume [key_srsume, kRES] string capability is the shifted resume key. + KeySrsume + + // The KeySsave [key_ssave, kSAV] string capability is the shifted save key. + KeySsave + + // The KeySsuspend [key_ssuspend, kSPD] string capability is the shifted suspend key. + KeySsuspend + + // The KeySundo [key_sundo, kUND] string capability is the shifted undo key. + KeySundo + + // The ReqForInput [req_for_input, rfi] string capability is the send next input char (for ptys). + ReqForInput + + // The KeyF11 [key_f11, kf11] string capability is the F11 function key. + KeyF11 + + // The KeyF12 [key_f12, kf12] string capability is the F12 function key. + KeyF12 + + // The KeyF13 [key_f13, kf13] string capability is the F13 function key. + KeyF13 + + // The KeyF14 [key_f14, kf14] string capability is the F14 function key. + KeyF14 + + // The KeyF15 [key_f15, kf15] string capability is the F15 function key. + KeyF15 + + // The KeyF16 [key_f16, kf16] string capability is the F16 function key. + KeyF16 + + // The KeyF17 [key_f17, kf17] string capability is the F17 function key. + KeyF17 + + // The KeyF18 [key_f18, kf18] string capability is the F18 function key. + KeyF18 + + // The KeyF19 [key_f19, kf19] string capability is the F19 function key. + KeyF19 + + // The KeyF20 [key_f20, kf20] string capability is the F20 function key. + KeyF20 + + // The KeyF21 [key_f21, kf21] string capability is the F21 function key. + KeyF21 + + // The KeyF22 [key_f22, kf22] string capability is the F22 function key. + KeyF22 + + // The KeyF23 [key_f23, kf23] string capability is the F23 function key. + KeyF23 + + // The KeyF24 [key_f24, kf24] string capability is the F24 function key. + KeyF24 + + // The KeyF25 [key_f25, kf25] string capability is the F25 function key. + KeyF25 + + // The KeyF26 [key_f26, kf26] string capability is the F26 function key. + KeyF26 + + // The KeyF27 [key_f27, kf27] string capability is the F27 function key. + KeyF27 + + // The KeyF28 [key_f28, kf28] string capability is the F28 function key. + KeyF28 + + // The KeyF29 [key_f29, kf29] string capability is the F29 function key. + KeyF29 + + // The KeyF30 [key_f30, kf30] string capability is the F30 function key. + KeyF30 + + // The KeyF31 [key_f31, kf31] string capability is the F31 function key. + KeyF31 + + // The KeyF32 [key_f32, kf32] string capability is the F32 function key. + KeyF32 + + // The KeyF33 [key_f33, kf33] string capability is the F33 function key. + KeyF33 + + // The KeyF34 [key_f34, kf34] string capability is the F34 function key. + KeyF34 + + // The KeyF35 [key_f35, kf35] string capability is the F35 function key. + KeyF35 + + // The KeyF36 [key_f36, kf36] string capability is the F36 function key. + KeyF36 + + // The KeyF37 [key_f37, kf37] string capability is the F37 function key. + KeyF37 + + // The KeyF38 [key_f38, kf38] string capability is the F38 function key. + KeyF38 + + // The KeyF39 [key_f39, kf39] string capability is the F39 function key. + KeyF39 + + // The KeyF40 [key_f40, kf40] string capability is the F40 function key. + KeyF40 + + // The KeyF41 [key_f41, kf41] string capability is the F41 function key. + KeyF41 + + // The KeyF42 [key_f42, kf42] string capability is the F42 function key. + KeyF42 + + // The KeyF43 [key_f43, kf43] string capability is the F43 function key. + KeyF43 + + // The KeyF44 [key_f44, kf44] string capability is the F44 function key. + KeyF44 + + // The KeyF45 [key_f45, kf45] string capability is the F45 function key. + KeyF45 + + // The KeyF46 [key_f46, kf46] string capability is the F46 function key. + KeyF46 + + // The KeyF47 [key_f47, kf47] string capability is the F47 function key. + KeyF47 + + // The KeyF48 [key_f48, kf48] string capability is the F48 function key. + KeyF48 + + // The KeyF49 [key_f49, kf49] string capability is the F49 function key. + KeyF49 + + // The KeyF50 [key_f50, kf50] string capability is the F50 function key. + KeyF50 + + // The KeyF51 [key_f51, kf51] string capability is the F51 function key. + KeyF51 + + // The KeyF52 [key_f52, kf52] string capability is the F52 function key. + KeyF52 + + // The KeyF53 [key_f53, kf53] string capability is the F53 function key. + KeyF53 + + // The KeyF54 [key_f54, kf54] string capability is the F54 function key. + KeyF54 + + // The KeyF55 [key_f55, kf55] string capability is the F55 function key. + KeyF55 + + // The KeyF56 [key_f56, kf56] string capability is the F56 function key. + KeyF56 + + // The KeyF57 [key_f57, kf57] string capability is the F57 function key. + KeyF57 + + // The KeyF58 [key_f58, kf58] string capability is the F58 function key. + KeyF58 + + // The KeyF59 [key_f59, kf59] string capability is the F59 function key. + KeyF59 + + // The KeyF60 [key_f60, kf60] string capability is the F60 function key. + KeyF60 + + // The KeyF61 [key_f61, kf61] string capability is the F61 function key. + KeyF61 + + // The KeyF62 [key_f62, kf62] string capability is the F62 function key. + KeyF62 + + // The KeyF63 [key_f63, kf63] string capability is the F63 function key. + KeyF63 + + // The ClrBol [clr_bol, el1] string capability is the Clear to beginning of line. + ClrBol + + // The ClearMargins [clear_margins, mgc] string capability is the clear right and left soft margins. + ClearMargins + + // The SetLeftMargin [set_left_margin, smgl] string capability is the set left soft margin at current column. See smgl. (ML is not in BSD termcap). + SetLeftMargin + + // The SetRightMargin [set_right_margin, smgr] string capability is the set right soft margin at current column. + SetRightMargin + + // The LabelFormat [label_format, fln] string capability is the label format. + LabelFormat + + // The SetClock [set_clock, sclk] string capability is the set clock, #1 hrs #2 mins #3 secs. + SetClock + + // The DisplayClock [display_clock, dclk] string capability is the display clock. + DisplayClock + + // The RemoveClock [remove_clock, rmclk] string capability is the remove clock. + RemoveClock + + // The CreateWindow [create_window, cwin] string capability is the define a window #1 from #2,#3 to #4,#5. + CreateWindow + + // The GotoWindow [goto_window, wingo] string capability is the go to window #1. + GotoWindow + + // The Hangup [hangup, hup] string capability is the hang-up phone. + Hangup + + // The DialPhone [dial_phone, dial] string capability is the dial number #1. + DialPhone + + // The QuickDial [quick_dial, qdial] string capability is the dial number #1 without checking. + QuickDial + + // The Tone [tone, tone] string capability is the select touch tone dialing. + Tone + + // The Pulse [pulse, pulse] string capability is the select pulse dialing. + Pulse + + // The FlashHook [flash_hook, hook] string capability is the flash switch hook. + FlashHook + + // The FixedPause [fixed_pause, pause] string capability is the pause for 2-3 seconds. + FixedPause + + // The WaitTone [wait_tone, wait] string capability is the wait for dial-tone. + WaitTone + + // The User0 [user0, u0] string capability is the User string #0. + User0 + + // The User1 [user1, u1] string capability is the User string #1. + User1 + + // The User2 [user2, u2] string capability is the User string #2. + User2 + + // The User3 [user3, u3] string capability is the User string #3. + User3 + + // The User4 [user4, u4] string capability is the User string #4. + User4 + + // The User5 [user5, u5] string capability is the User string #5. + User5 + + // The User6 [user6, u6] string capability is the User string #6. + User6 + + // The User7 [user7, u7] string capability is the User string #7. + User7 + + // The User8 [user8, u8] string capability is the User string #8. + User8 + + // The User9 [user9, u9] string capability is the User string #9. + User9 + + // The OrigPair [orig_pair, op] string capability is the Set default pair to its original value. + OrigPair + + // The OrigColors [orig_colors, oc] string capability is the Set all color pairs to the original ones. + OrigColors + + // The InitializeColor [initialize_color, initc] string capability is the initialize color #1 to (#2,#3,#4). + InitializeColor + + // The InitializePair [initialize_pair, initp] string capability is the Initialize color pair #1 to fg=(#2,#3,#4), bg=(#5,#6,#7). + InitializePair + + // The SetColorPair [set_color_pair, scp] string capability is the Set current color pair to #1. + SetColorPair + + // The SetForeground [set_foreground, setf] string capability is the Set foreground color #1. + SetForeground + + // The SetBackground [set_background, setb] string capability is the Set background color #1. + SetBackground + + // The ChangeCharPitch [change_char_pitch, cpi] string capability is the Change number of characters per inch to #1. + ChangeCharPitch + + // The ChangeLinePitch [change_line_pitch, lpi] string capability is the Change number of lines per inch to #1. + ChangeLinePitch + + // The ChangeResHorz [change_res_horz, chr] string capability is the Change horizontal resolution to #1. + ChangeResHorz + + // The ChangeResVert [change_res_vert, cvr] string capability is the Change vertical resolution to #1. + ChangeResVert + + // The DefineChar [define_char, defc] string capability is the Define a character #1, #2 dots wide, descender #3. + DefineChar + + // The EnterDoublewideMode [enter_doublewide_mode, swidm] string capability is the Enter double-wide mode. + EnterDoublewideMode + + // The EnterDraftQuality [enter_draft_quality, sdrfq] string capability is the Enter draft-quality mode. + EnterDraftQuality + + // The EnterItalicsMode [enter_italics_mode, sitm] string capability is the Enter italic mode. + EnterItalicsMode + + // The EnterLeftwardMode [enter_leftward_mode, slm] string capability is the Start leftward carriage motion. + EnterLeftwardMode + + // The EnterMicroMode [enter_micro_mode, smicm] string capability is the Start micro-motion mode. + EnterMicroMode + + // The EnterNearLetterQuality [enter_near_letter_quality, snlq] string capability is the Enter NLQ mode. + EnterNearLetterQuality + + // The EnterNormalQuality [enter_normal_quality, snrmq] string capability is the Enter normal-quality mode. + EnterNormalQuality + + // The EnterShadowMode [enter_shadow_mode, sshm] string capability is the Enter shadow-print mode. + EnterShadowMode + + // The EnterSubscriptMode [enter_subscript_mode, ssubm] string capability is the Enter subscript mode. + EnterSubscriptMode + + // The EnterSuperscriptMode [enter_superscript_mode, ssupm] string capability is the Enter superscript mode. + EnterSuperscriptMode + + // The EnterUpwardMode [enter_upward_mode, sum] string capability is the Start upward carriage motion. + EnterUpwardMode + + // The ExitDoublewideMode [exit_doublewide_mode, rwidm] string capability is the End double-wide mode. + ExitDoublewideMode + + // The ExitItalicsMode [exit_italics_mode, ritm] string capability is the End italic mode. + ExitItalicsMode + + // The ExitLeftwardMode [exit_leftward_mode, rlm] string capability is the End left-motion mode. + ExitLeftwardMode + + // The ExitMicroMode [exit_micro_mode, rmicm] string capability is the End micro-motion mode. + ExitMicroMode + + // The ExitShadowMode [exit_shadow_mode, rshm] string capability is the End shadow-print mode. + ExitShadowMode + + // The ExitSubscriptMode [exit_subscript_mode, rsubm] string capability is the End subscript mode. + ExitSubscriptMode + + // The ExitSuperscriptMode [exit_superscript_mode, rsupm] string capability is the End superscript mode. + ExitSuperscriptMode + + // The ExitUpwardMode [exit_upward_mode, rum] string capability is the End reverse character motion. + ExitUpwardMode + + // The MicroColumnAddress [micro_column_address, mhpa] string capability is the Like column_address in micro mode. + MicroColumnAddress + + // The MicroDown [micro_down, mcud1] string capability is the Like cursor_down in micro mode. + MicroDown + + // The MicroLeft [micro_left, mcub1] string capability is the Like cursor_left in micro mode. + MicroLeft + + // The MicroRight [micro_right, mcuf1] string capability is the Like cursor_right in micro mode. + MicroRight + + // The MicroRowAddress [micro_row_address, mvpa] string capability is the Like row_address #1 in micro mode. + MicroRowAddress + + // The MicroUp [micro_up, mcuu1] string capability is the Like cursor_up in micro mode. + MicroUp + + // The OrderOfPins [order_of_pins, porder] string capability is the Match software bits to print-head pins. + OrderOfPins + + // The ParmDownMicro [parm_down_micro, mcud] string capability is the Like parm_down_cursor in micro mode. + ParmDownMicro + + // The ParmLeftMicro [parm_left_micro, mcub] string capability is the Like parm_left_cursor in micro mode. + ParmLeftMicro + + // The ParmRightMicro [parm_right_micro, mcuf] string capability is the Like parm_right_cursor in micro mode. + ParmRightMicro + + // The ParmUpMicro [parm_up_micro, mcuu] string capability is the Like parm_up_cursor in micro mode. + ParmUpMicro + + // The SelectCharSet [select_char_set, scs] string capability is the Select character set, #1. + SelectCharSet + + // The SetBottomMargin [set_bottom_margin, smgb] string capability is the Set bottom margin at current line. + SetBottomMargin + + // The SetBottomMarginParm [set_bottom_margin_parm, smgbp] string capability is the Set bottom margin at line #1 or (if smgtp is not given) #2 lines from bottom. + SetBottomMarginParm + + // The SetLeftMarginParm [set_left_margin_parm, smglp] string capability is the Set left (right) margin at column #1. + SetLeftMarginParm + + // The SetRightMarginParm [set_right_margin_parm, smgrp] string capability is the Set right margin at column #1. + SetRightMarginParm + + // The SetTopMargin [set_top_margin, smgt] string capability is the Set top margin at current line. + SetTopMargin + + // The SetTopMarginParm [set_top_margin_parm, smgtp] string capability is the Set top (bottom) margin at row #1. + SetTopMarginParm + + // The StartBitImage [start_bit_image, sbim] string capability is the Start printing bit image graphics. + StartBitImage + + // The StartCharSetDef [start_char_set_def, scsd] string capability is the Start character set definition #1, with #2 characters in the set. + StartCharSetDef + + // The StopBitImage [stop_bit_image, rbim] string capability is the Stop printing bit image graphics. + StopBitImage + + // The StopCharSetDef [stop_char_set_def, rcsd] string capability is the End definition of character set #1. + StopCharSetDef + + // The SubscriptCharacters [subscript_characters, subcs] string capability is the List of subscriptable characters. + SubscriptCharacters + + // The SuperscriptCharacters [superscript_characters, supcs] string capability is the List of superscriptable characters. + SuperscriptCharacters + + // The TheseCauseCr [these_cause_cr, docr] string capability is the Printing any of these characters causes CR. + TheseCauseCr + + // The ZeroMotion [zero_motion, zerom] string capability is the No motion for subsequent character. + ZeroMotion + + // The CharSetNames [char_set_names, csnm] string capability is the Produce #1'th item from list of character set names. + CharSetNames + + // The KeyMouse [key_mouse, kmous] string capability is the Mouse event has occurred. + KeyMouse + + // The MouseInfo [mouse_info, minfo] string capability is the Mouse status information. + MouseInfo + + // The ReqMousePos [req_mouse_pos, reqmp] string capability is the Request mouse position. + ReqMousePos + + // The GetMouse [get_mouse, getm] string capability is the Curses should get button events, parameter #1 not documented. + GetMouse + + // The SetAForeground [set_a_foreground, setaf] string capability is the Set foreground color to #1, using ANSI escape. + SetAForeground + + // The SetABackground [set_a_background, setab] string capability is the Set background color to #1, using ANSI escape. + SetABackground + + // The PkeyPlab [pkey_plab, pfxl] string capability is the Program function key #1 to type string #2 and show string #3. + PkeyPlab + + // The DeviceType [device_type, devt] string capability is the Indicate language/codeset support. + DeviceType + + // The CodeSetInit [code_set_init, csin] string capability is the Init sequence for multiple codesets. + CodeSetInit + + // The Set0DesSeq [set0_des_seq, s0ds] string capability is the Shift to codeset 0 (EUC set 0, ASCII). + Set0DesSeq + + // The Set1DesSeq [set1_des_seq, s1ds] string capability is the Shift to codeset 1. + Set1DesSeq + + // The Set2DesSeq [set2_des_seq, s2ds] string capability is the Shift to codeset 2. + Set2DesSeq + + // The Set3DesSeq [set3_des_seq, s3ds] string capability is the Shift to codeset 3. + Set3DesSeq + + // The SetLrMargin [set_lr_margin, smglr] string capability is the Set both left and right margins to #1, #2. (ML is not in BSD termcap). + SetLrMargin + + // The SetTbMargin [set_tb_margin, smgtb] string capability is the Sets both top and bottom margins to #1, #2. + SetTbMargin + + // The BitImageRepeat [bit_image_repeat, birep] string capability is the Repeat bit image cell #1 #2 times. + BitImageRepeat + + // The BitImageNewline [bit_image_newline, binel] string capability is the Move to next row of the bit image. + BitImageNewline + + // The BitImageCarriageReturn [bit_image_carriage_return, bicr] string capability is the Move to beginning of same row. + BitImageCarriageReturn + + // The ColorNames [color_names, colornm] string capability is the Give name for color #1. + ColorNames + + // The DefineBitImageRegion [define_bit_image_region, defbi] string capability is the Define rectangular bit image region. + DefineBitImageRegion + + // The EndBitImageRegion [end_bit_image_region, endbi] string capability is the End a bit-image region. + EndBitImageRegion + + // The SetColorBand [set_color_band, setcolor] string capability is the Change to ribbon color #1. + SetColorBand + + // The SetPageLength [set_page_length, slines] string capability is the Set page length to #1 lines. + SetPageLength + + // The DisplayPcChar [display_pc_char, dispc] string capability is the Display PC character #1. + DisplayPcChar + + // The EnterPcCharsetMode [enter_pc_charset_mode, smpch] string capability is the Enter PC character display mode. + EnterPcCharsetMode + + // The ExitPcCharsetMode [exit_pc_charset_mode, rmpch] string capability is the Exit PC character display mode. + ExitPcCharsetMode + + // The EnterScancodeMode [enter_scancode_mode, smsc] string capability is the Enter PC scancode mode. + EnterScancodeMode + + // The ExitScancodeMode [exit_scancode_mode, rmsc] string capability is the Exit PC scancode mode. + ExitScancodeMode + + // The PcTermOptions [pc_term_options, pctrm] string capability is the PC terminal options. + PcTermOptions + + // The ScancodeEscape [scancode_escape, scesc] string capability is the Escape for scancode emulation. + ScancodeEscape + + // The AltScancodeEsc [alt_scancode_esc, scesa] string capability is the Alternate escape for scancode emulation. + AltScancodeEsc + + // The EnterHorizontalHlMode [enter_horizontal_hl_mode, ehhlm] string capability is the Enter horizontal highlight mode. + EnterHorizontalHlMode + + // The EnterLeftHlMode [enter_left_hl_mode, elhlm] string capability is the Enter left highlight mode. + EnterLeftHlMode + + // The EnterLowHlMode [enter_low_hl_mode, elohlm] string capability is the Enter low highlight mode. + EnterLowHlMode + + // The EnterRightHlMode [enter_right_hl_mode, erhlm] string capability is the Enter right highlight mode. + EnterRightHlMode + + // The EnterTopHlMode [enter_top_hl_mode, ethlm] string capability is the Enter top highlight mode. + EnterTopHlMode + + // The EnterVerticalHlMode [enter_vertical_hl_mode, evhlm] string capability is the Enter vertical highlight mode. + EnterVerticalHlMode + + // The SetAAttributes [set_a_attributes, sgr1] string capability is the Define second set of video attributes #1-#6. + SetAAttributes + + // The SetPglenInch [set_pglen_inch, slength] string capability is the Set page length to #1 hundredth of an inch (some implementations use sL for termcap). + SetPglenInch + + // The TermcapInit2 [termcap_init2, OTi2] string capability is the secondary initialization string. + TermcapInit2 + + // The TermcapReset [termcap_reset, OTrs] string capability is the terminal reset string. + TermcapReset + + // The LinefeedIfNotLf [linefeed_if_not_lf, OTnl] string capability is the use to move down. + LinefeedIfNotLf + + // The BackspaceIfNotBs [backspace_if_not_bs, OTbc] string capability is the move left, if not ^H. + BackspaceIfNotBs + + // The OtherNonFunctionKeys [other_non_function_keys, OTko] string capability is the list of self-mapped keycaps. + OtherNonFunctionKeys + + // The ArrowKeyMap [arrow_key_map, OTma] string capability is the map motion-keys for vi version 2. + ArrowKeyMap + + // The AcsUlcorner [acs_ulcorner, OTG2] string capability is the single upper left. + AcsUlcorner + + // The AcsLlcorner [acs_llcorner, OTG3] string capability is the single lower left. + AcsLlcorner + + // The AcsUrcorner [acs_urcorner, OTG1] string capability is the single upper right. + AcsUrcorner + + // The AcsLrcorner [acs_lrcorner, OTG4] string capability is the single lower right. + AcsLrcorner + + // The AcsLtee [acs_ltee, OTGR] string capability is the tee pointing right. + AcsLtee + + // The AcsRtee [acs_rtee, OTGL] string capability is the tee pointing left. + AcsRtee + + // The AcsBtee [acs_btee, OTGU] string capability is the tee pointing up. + AcsBtee + + // The AcsTtee [acs_ttee, OTGD] string capability is the tee pointing down. + AcsTtee + + // The AcsHline [acs_hline, OTGH] string capability is the single horizontal line. + AcsHline + + // The AcsVline [acs_vline, OTGV] string capability is the single vertical line. + AcsVline + + // The AcsPlus [acs_plus, OTGC] string capability is the single intersection. + AcsPlus + + // The MemoryLock [memory_lock, meml] string capability is the lock memory above cursor. + MemoryLock + + // The MemoryUnlock [memory_unlock, memu] string capability is the unlock memory. + MemoryUnlock + + // The BoxChars1 [box_chars_1, box1] string capability is the box characters primary set. + BoxChars1 +) + +const ( + // CapCountBool is the count of bool capabilities. + CapCountBool = ReturnDoesClrEol + 1 + + // CapCountNum is the count of num capabilities. + CapCountNum = NumberOfFunctionKeys + 1 + + // CapCountString is the count of string capabilities. + CapCountString = BoxChars1 + 1 +) + +// boolCapNames are the bool term cap names. +var boolCapNames = [...]string{ + "auto_left_margin", "bw", + "auto_right_margin", "am", + "no_esc_ctlc", "xsb", + "ceol_standout_glitch", "xhp", + "eat_newline_glitch", "xenl", + "erase_overstrike", "eo", + "generic_type", "gn", + "hard_copy", "hc", + "has_meta_key", "km", + "has_status_line", "hs", + "insert_null_glitch", "in", + "memory_above", "da", + "memory_below", "db", + "move_insert_mode", "mir", + "move_standout_mode", "msgr", + "over_strike", "os", + "status_line_esc_ok", "eslok", + "dest_tabs_magic_smso", "xt", + "tilde_glitch", "hz", + "transparent_underline", "ul", + "xon_xoff", "xon", + "needs_xon_xoff", "nxon", + "prtr_silent", "mc5i", + "hard_cursor", "chts", + "non_rev_rmcup", "nrrmc", + "no_pad_char", "npc", + "non_dest_scroll_region", "ndscr", + "can_change", "ccc", + "back_color_erase", "bce", + "hue_lightness_saturation", "hls", + "col_addr_glitch", "xhpa", + "cr_cancels_micro_mode", "crxm", + "has_print_wheel", "daisy", + "row_addr_glitch", "xvpa", + "semi_auto_right_margin", "sam", + "cpi_changes_res", "cpix", + "lpi_changes_res", "lpix", + "backspaces_with_bs", "OTbs", + "crt_no_scrolling", "OTns", + "no_correctly_working_cr", "OTnc", + "gnu_has_meta_key", "OTMT", + "linefeed_is_newline", "OTNL", + "has_hardware_tabs", "OTpt", + "return_does_clr_eol", "OTxr", +} + +// numCapNames are the num term cap names. +var numCapNames = [...]string{ + "columns", "cols", + "init_tabs", "it", + "lines", "lines", + "lines_of_memory", "lm", + "magic_cookie_glitch", "xmc", + "padding_baud_rate", "pb", + "virtual_terminal", "vt", + "width_status_line", "wsl", + "num_labels", "nlab", + "label_height", "lh", + "label_width", "lw", + "max_attributes", "ma", + "maximum_windows", "wnum", + "max_colors", "colors", + "max_pairs", "pairs", + "no_color_video", "ncv", + "buffer_capacity", "bufsz", + "dot_vert_spacing", "spinv", + "dot_horz_spacing", "spinh", + "max_micro_address", "maddr", + "max_micro_jump", "mjump", + "micro_col_size", "mcs", + "micro_line_size", "mls", + "number_of_pins", "npins", + "output_res_char", "orc", + "output_res_line", "orl", + "output_res_horz_inch", "orhi", + "output_res_vert_inch", "orvi", + "print_rate", "cps", + "wide_char_size", "widcs", + "buttons", "btns", + "bit_image_entwining", "bitwin", + "bit_image_type", "bitype", + "magic_cookie_glitch_ul", "OTug", + "carriage_return_delay", "OTdC", + "new_line_delay", "OTdN", + "backspace_delay", "OTdB", + "horizontal_tab_delay", "OTdT", + "number_of_function_keys", "OTkn", +} + +// stringCapNames are the string term cap names. +var stringCapNames = [...]string{ + "back_tab", "cbt", + "bell", "bel", + "carriage_return", "cr", + "change_scroll_region", "csr", + "clear_all_tabs", "tbc", + "clear_screen", "clear", + "clr_eol", "el", + "clr_eos", "ed", + "column_address", "hpa", + "command_character", "cmdch", + "cursor_address", "cup", + "cursor_down", "cud1", + "cursor_home", "home", + "cursor_invisible", "civis", + "cursor_left", "cub1", + "cursor_mem_address", "mrcup", + "cursor_normal", "cnorm", + "cursor_right", "cuf1", + "cursor_to_ll", "ll", + "cursor_up", "cuu1", + "cursor_visible", "cvvis", + "delete_character", "dch1", + "delete_line", "dl1", + "dis_status_line", "dsl", + "down_half_line", "hd", + "enter_alt_charset_mode", "smacs", + "enter_blink_mode", "blink", + "enter_bold_mode", "bold", + "enter_ca_mode", "smcup", + "enter_delete_mode", "smdc", + "enter_dim_mode", "dim", + "enter_insert_mode", "smir", + "enter_secure_mode", "invis", + "enter_protected_mode", "prot", + "enter_reverse_mode", "rev", + "enter_standout_mode", "smso", + "enter_underline_mode", "smul", + "erase_chars", "ech", + "exit_alt_charset_mode", "rmacs", + "exit_attribute_mode", "sgr0", + "exit_ca_mode", "rmcup", + "exit_delete_mode", "rmdc", + "exit_insert_mode", "rmir", + "exit_standout_mode", "rmso", + "exit_underline_mode", "rmul", + "flash_screen", "flash", + "form_feed", "ff", + "from_status_line", "fsl", + "init_1string", "is1", + "init_2string", "is2", + "init_3string", "is3", + "init_file", "if", + "insert_character", "ich1", + "insert_line", "il1", + "insert_padding", "ip", + "key_backspace", "kbs", + "key_catab", "ktbc", + "key_clear", "kclr", + "key_ctab", "kctab", + "key_dc", "kdch1", + "key_dl", "kdl1", + "key_down", "kcud1", + "key_eic", "krmir", + "key_eol", "kel", + "key_eos", "ked", + "key_f0", "kf0", + "key_f1", "kf1", + "key_f10", "kf10", + "key_f2", "kf2", + "key_f3", "kf3", + "key_f4", "kf4", + "key_f5", "kf5", + "key_f6", "kf6", + "key_f7", "kf7", + "key_f8", "kf8", + "key_f9", "kf9", + "key_home", "khome", + "key_ic", "kich1", + "key_il", "kil1", + "key_left", "kcub1", + "key_ll", "kll", + "key_npage", "knp", + "key_ppage", "kpp", + "key_right", "kcuf1", + "key_sf", "kind", + "key_sr", "kri", + "key_stab", "khts", + "key_up", "kcuu1", + "keypad_local", "rmkx", + "keypad_xmit", "smkx", + "lab_f0", "lf0", + "lab_f1", "lf1", + "lab_f10", "lf10", + "lab_f2", "lf2", + "lab_f3", "lf3", + "lab_f4", "lf4", + "lab_f5", "lf5", + "lab_f6", "lf6", + "lab_f7", "lf7", + "lab_f8", "lf8", + "lab_f9", "lf9", + "meta_off", "rmm", + "meta_on", "smm", + "newline", "nel", + "pad_char", "pad", + "parm_dch", "dch", + "parm_delete_line", "dl", + "parm_down_cursor", "cud", + "parm_ich", "ich", + "parm_index", "indn", + "parm_insert_line", "il", + "parm_left_cursor", "cub", + "parm_right_cursor", "cuf", + "parm_rindex", "rin", + "parm_up_cursor", "cuu", + "pkey_key", "pfkey", + "pkey_local", "pfloc", + "pkey_xmit", "pfx", + "print_screen", "mc0", + "prtr_off", "mc4", + "prtr_on", "mc5", + "repeat_char", "rep", + "reset_1string", "rs1", + "reset_2string", "rs2", + "reset_3string", "rs3", + "reset_file", "rf", + "restore_cursor", "rc", + "row_address", "vpa", + "save_cursor", "sc", + "scroll_forward", "ind", + "scroll_reverse", "ri", + "set_attributes", "sgr", + "set_tab", "hts", + "set_window", "wind", + "tab", "ht", + "to_status_line", "tsl", + "underline_char", "uc", + "up_half_line", "hu", + "init_prog", "iprog", + "key_a1", "ka1", + "key_a3", "ka3", + "key_b2", "kb2", + "key_c1", "kc1", + "key_c3", "kc3", + "prtr_non", "mc5p", + "char_padding", "rmp", + "acs_chars", "acsc", + "plab_norm", "pln", + "key_btab", "kcbt", + "enter_xon_mode", "smxon", + "exit_xon_mode", "rmxon", + "enter_am_mode", "smam", + "exit_am_mode", "rmam", + "xon_character", "xonc", + "xoff_character", "xoffc", + "ena_acs", "enacs", + "label_on", "smln", + "label_off", "rmln", + "key_beg", "kbeg", + "key_cancel", "kcan", + "key_close", "kclo", + "key_command", "kcmd", + "key_copy", "kcpy", + "key_create", "kcrt", + "key_end", "kend", + "key_enter", "kent", + "key_exit", "kext", + "key_find", "kfnd", + "key_help", "khlp", + "key_mark", "kmrk", + "key_message", "kmsg", + "key_move", "kmov", + "key_next", "knxt", + "key_open", "kopn", + "key_options", "kopt", + "key_previous", "kprv", + "key_print", "kprt", + "key_redo", "krdo", + "key_reference", "kref", + "key_refresh", "krfr", + "key_replace", "krpl", + "key_restart", "krst", + "key_resume", "kres", + "key_save", "ksav", + "key_suspend", "kspd", + "key_undo", "kund", + "key_sbeg", "kBEG", + "key_scancel", "kCAN", + "key_scommand", "kCMD", + "key_scopy", "kCPY", + "key_screate", "kCRT", + "key_sdc", "kDC", + "key_sdl", "kDL", + "key_select", "kslt", + "key_send", "kEND", + "key_seol", "kEOL", + "key_sexit", "kEXT", + "key_sfind", "kFND", + "key_shelp", "kHLP", + "key_shome", "kHOM", + "key_sic", "kIC", + "key_sleft", "kLFT", + "key_smessage", "kMSG", + "key_smove", "kMOV", + "key_snext", "kNXT", + "key_soptions", "kOPT", + "key_sprevious", "kPRV", + "key_sprint", "kPRT", + "key_sredo", "kRDO", + "key_sreplace", "kRPL", + "key_sright", "kRIT", + "key_srsume", "kRES", + "key_ssave", "kSAV", + "key_ssuspend", "kSPD", + "key_sundo", "kUND", + "req_for_input", "rfi", + "key_f11", "kf11", + "key_f12", "kf12", + "key_f13", "kf13", + "key_f14", "kf14", + "key_f15", "kf15", + "key_f16", "kf16", + "key_f17", "kf17", + "key_f18", "kf18", + "key_f19", "kf19", + "key_f20", "kf20", + "key_f21", "kf21", + "key_f22", "kf22", + "key_f23", "kf23", + "key_f24", "kf24", + "key_f25", "kf25", + "key_f26", "kf26", + "key_f27", "kf27", + "key_f28", "kf28", + "key_f29", "kf29", + "key_f30", "kf30", + "key_f31", "kf31", + "key_f32", "kf32", + "key_f33", "kf33", + "key_f34", "kf34", + "key_f35", "kf35", + "key_f36", "kf36", + "key_f37", "kf37", + "key_f38", "kf38", + "key_f39", "kf39", + "key_f40", "kf40", + "key_f41", "kf41", + "key_f42", "kf42", + "key_f43", "kf43", + "key_f44", "kf44", + "key_f45", "kf45", + "key_f46", "kf46", + "key_f47", "kf47", + "key_f48", "kf48", + "key_f49", "kf49", + "key_f50", "kf50", + "key_f51", "kf51", + "key_f52", "kf52", + "key_f53", "kf53", + "key_f54", "kf54", + "key_f55", "kf55", + "key_f56", "kf56", + "key_f57", "kf57", + "key_f58", "kf58", + "key_f59", "kf59", + "key_f60", "kf60", + "key_f61", "kf61", + "key_f62", "kf62", + "key_f63", "kf63", + "clr_bol", "el1", + "clear_margins", "mgc", + "set_left_margin", "smgl", + "set_right_margin", "smgr", + "label_format", "fln", + "set_clock", "sclk", + "display_clock", "dclk", + "remove_clock", "rmclk", + "create_window", "cwin", + "goto_window", "wingo", + "hangup", "hup", + "dial_phone", "dial", + "quick_dial", "qdial", + "tone", "tone", + "pulse", "pulse", + "flash_hook", "hook", + "fixed_pause", "pause", + "wait_tone", "wait", + "user0", "u0", + "user1", "u1", + "user2", "u2", + "user3", "u3", + "user4", "u4", + "user5", "u5", + "user6", "u6", + "user7", "u7", + "user8", "u8", + "user9", "u9", + "orig_pair", "op", + "orig_colors", "oc", + "initialize_color", "initc", + "initialize_pair", "initp", + "set_color_pair", "scp", + "set_foreground", "setf", + "set_background", "setb", + "change_char_pitch", "cpi", + "change_line_pitch", "lpi", + "change_res_horz", "chr", + "change_res_vert", "cvr", + "define_char", "defc", + "enter_doublewide_mode", "swidm", + "enter_draft_quality", "sdrfq", + "enter_italics_mode", "sitm", + "enter_leftward_mode", "slm", + "enter_micro_mode", "smicm", + "enter_near_letter_quality", "snlq", + "enter_normal_quality", "snrmq", + "enter_shadow_mode", "sshm", + "enter_subscript_mode", "ssubm", + "enter_superscript_mode", "ssupm", + "enter_upward_mode", "sum", + "exit_doublewide_mode", "rwidm", + "exit_italics_mode", "ritm", + "exit_leftward_mode", "rlm", + "exit_micro_mode", "rmicm", + "exit_shadow_mode", "rshm", + "exit_subscript_mode", "rsubm", + "exit_superscript_mode", "rsupm", + "exit_upward_mode", "rum", + "micro_column_address", "mhpa", + "micro_down", "mcud1", + "micro_left", "mcub1", + "micro_right", "mcuf1", + "micro_row_address", "mvpa", + "micro_up", "mcuu1", + "order_of_pins", "porder", + "parm_down_micro", "mcud", + "parm_left_micro", "mcub", + "parm_right_micro", "mcuf", + "parm_up_micro", "mcuu", + "select_char_set", "scs", + "set_bottom_margin", "smgb", + "set_bottom_margin_parm", "smgbp", + "set_left_margin_parm", "smglp", + "set_right_margin_parm", "smgrp", + "set_top_margin", "smgt", + "set_top_margin_parm", "smgtp", + "start_bit_image", "sbim", + "start_char_set_def", "scsd", + "stop_bit_image", "rbim", + "stop_char_set_def", "rcsd", + "subscript_characters", "subcs", + "superscript_characters", "supcs", + "these_cause_cr", "docr", + "zero_motion", "zerom", + "char_set_names", "csnm", + "key_mouse", "kmous", + "mouse_info", "minfo", + "req_mouse_pos", "reqmp", + "get_mouse", "getm", + "set_a_foreground", "setaf", + "set_a_background", "setab", + "pkey_plab", "pfxl", + "device_type", "devt", + "code_set_init", "csin", + "set0_des_seq", "s0ds", + "set1_des_seq", "s1ds", + "set2_des_seq", "s2ds", + "set3_des_seq", "s3ds", + "set_lr_margin", "smglr", + "set_tb_margin", "smgtb", + "bit_image_repeat", "birep", + "bit_image_newline", "binel", + "bit_image_carriage_return", "bicr", + "color_names", "colornm", + "define_bit_image_region", "defbi", + "end_bit_image_region", "endbi", + "set_color_band", "setcolor", + "set_page_length", "slines", + "display_pc_char", "dispc", + "enter_pc_charset_mode", "smpch", + "exit_pc_charset_mode", "rmpch", + "enter_scancode_mode", "smsc", + "exit_scancode_mode", "rmsc", + "pc_term_options", "pctrm", + "scancode_escape", "scesc", + "alt_scancode_esc", "scesa", + "enter_horizontal_hl_mode", "ehhlm", + "enter_left_hl_mode", "elhlm", + "enter_low_hl_mode", "elohlm", + "enter_right_hl_mode", "erhlm", + "enter_top_hl_mode", "ethlm", + "enter_vertical_hl_mode", "evhlm", + "set_a_attributes", "sgr1", + "set_pglen_inch", "slength", + "termcap_init2", "OTi2", + "termcap_reset", "OTrs", + "linefeed_if_not_lf", "OTnl", + "backspace_if_not_bs", "OTbc", + "other_non_function_keys", "OTko", + "arrow_key_map", "OTma", + "acs_ulcorner", "OTG2", + "acs_llcorner", "OTG3", + "acs_urcorner", "OTG1", + "acs_lrcorner", "OTG4", + "acs_ltee", "OTGR", + "acs_rtee", "OTGL", + "acs_btee", "OTGU", + "acs_ttee", "OTGD", + "acs_hline", "OTGH", + "acs_vline", "OTGV", + "acs_plus", "OTGC", + "memory_lock", "meml", + "memory_unlock", "memu", + "box_chars_1", "box1", +} diff --git a/vendor/github.com/xo/terminfo/color.go b/vendor/github.com/xo/terminfo/color.go new file mode 100644 index 0000000000..453c29c24e --- /dev/null +++ b/vendor/github.com/xo/terminfo/color.go @@ -0,0 +1,91 @@ +package terminfo + +import ( + "os" + "strconv" + "strings" +) + +// ColorLevel is the color level supported by a terminal. +type ColorLevel uint + +// ColorLevel values. +const ( + ColorLevelNone ColorLevel = iota + ColorLevelBasic + ColorLevelHundreds + ColorLevelMillions +) + +// String satisfies the Stringer interface. +func (c ColorLevel) String() string { + switch c { + case ColorLevelBasic: + return "basic" + case ColorLevelHundreds: + return "hundreds" + case ColorLevelMillions: + return "millions" + } + return "none" +} + +// ChromaFormatterName returns the github.com/alecthomas/chroma compatible +// formatter name for the color level. +func (c ColorLevel) ChromaFormatterName() string { + switch c { + case ColorLevelBasic: + return "terminal" + case ColorLevelHundreds: + return "terminal256" + case ColorLevelMillions: + return "terminal16m" + } + return "noop" +} + +// ColorLevelFromEnv returns the color level COLORTERM, FORCE_COLOR, +// TERM_PROGRAM, or determined from the TERM environment variable. +func ColorLevelFromEnv() (ColorLevel, error) { + // check for overriding environment variables + colorTerm, termProg, forceColor := os.Getenv("COLORTERM"), os.Getenv("TERM_PROGRAM"), os.Getenv("FORCE_COLOR") + switch { + case strings.Contains(colorTerm, "truecolor") || strings.Contains(colorTerm, "24bit") || termProg == "Hyper": + return ColorLevelMillions, nil + case colorTerm != "" || forceColor != "": + return ColorLevelBasic, nil + case termProg == "Apple_Terminal": + return ColorLevelHundreds, nil + case termProg == "iTerm.app": + ver := os.Getenv("TERM_PROGRAM_VERSION") + if ver == "" { + return ColorLevelHundreds, nil + } + i, err := strconv.Atoi(strings.Split(ver, ".")[0]) + if err != nil { + return ColorLevelNone, ErrInvalidTermProgramVersion + } + if i == 3 { + return ColorLevelMillions, nil + } + return ColorLevelHundreds, nil + } + + // otherwise determine from TERM's max_colors capability + if term := os.Getenv("TERM"); term != "" { + ti, err := Load(term) + if err != nil { + return ColorLevelNone, err + } + + v, ok := ti.Nums[MaxColors] + switch { + case !ok || v <= 16: + return ColorLevelNone, nil + case ok && v >= 256: + return ColorLevelHundreds, nil + } + } + + return ColorLevelBasic, nil +} diff --git a/vendor/github.com/xo/terminfo/load.go b/vendor/github.com/xo/terminfo/load.go new file mode 100644 index 0000000000..9b0b942863 --- /dev/null +++ b/vendor/github.com/xo/terminfo/load.go @@ -0,0 +1,72 @@ +package terminfo + +import ( + "os" + "os/user" + "path" + "strings" + "sync" +) + +// termCache is the terminfo cache. +var termCache = struct { + db map[string]*Terminfo + sync.RWMutex +}{ + db: make(map[string]*Terminfo), +} + +// Load follows the behavior described in terminfo(5) to find correct the +// terminfo file using the name, reads the file and then returns a Terminfo +// struct that describes the file. +func Load(name string) (*Terminfo, error) { + if name == "" { + return nil, ErrEmptyTermName + } + + termCache.RLock() + ti, ok := termCache.db[name] + termCache.RUnlock() + + if ok { + return ti, nil + } + + var checkDirs []string + + // check $TERMINFO + if dir := os.Getenv("TERMINFO"); dir != "" { + checkDirs = append(checkDirs, dir) + } + + // check $HOME/.terminfo + u, err := user.Current() + if err != nil { + return nil, err + } + checkDirs = append(checkDirs, path.Join(u.HomeDir, ".terminfo")) + + // check $TERMINFO_DIRS + if dirs := os.Getenv("TERMINFO_DIRS"); dirs != "" { + checkDirs = append(checkDirs, strings.Split(dirs, ":")...) + } + + // check fallback directories + checkDirs = append(checkDirs, "/etc/terminfo", "/lib/terminfo", "/usr/share/terminfo") + for _, dir := range checkDirs { + ti, err = Open(dir, name) + if err != nil && err != ErrFileNotFound && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + return ti, nil + } + } + + return nil, ErrDatabaseDirectoryNotFound +} + +// LoadFromEnv loads the terminal info based on the name contained in +// environment variable TERM. +func LoadFromEnv() (*Terminfo, error) { + return Load(os.Getenv("TERM")) +} diff --git a/vendor/github.com/xo/terminfo/param.go b/vendor/github.com/xo/terminfo/param.go new file mode 100644 index 0000000000..e6b8a1bc03 --- /dev/null +++ b/vendor/github.com/xo/terminfo/param.go @@ -0,0 +1,490 @@ +package terminfo + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "sync" +) + +// parametizer represents the a scan state for a parameterized string. +type parametizer struct { + // z is the string to parameterize + z []byte + + // pos is the current position in s. + pos int + + // nest is the current nest level. + nest int + + // s is the variable stack. + s stack + + // skipElse keeps the state of skipping else. + skipElse bool + + // buf is the result buffer. + buf *bytes.Buffer + + // params are the parameters to interpolate. + params [9]interface{} + + // vars are dynamic variables. + vars [26]interface{} +} + +// staticVars are the static, global variables. +var staticVars = struct { + vars [26]interface{} + sync.Mutex +}{} + +var parametizerPool = sync.Pool{ + New: func() interface{} { + p := new(parametizer) + p.buf = bytes.NewBuffer(make([]byte, 0, 45)) + return p + }, +} + +// newParametizer returns a new initialized parametizer from the pool. +func newParametizer(z []byte) *parametizer { + p := parametizerPool.Get().(*parametizer) + p.z = z + + return p +} + +// reset resets the parametizer. +func (p *parametizer) reset() { + p.pos, p.nest = 0, 0 + + p.s.reset() + p.buf.Reset() + + p.params, p.vars = [9]interface{}{}, [26]interface{}{} + + parametizerPool.Put(p) +} + +// stateFn represents the state of the scanner as a function that returns the +// next state. +type stateFn func() stateFn + +// exec executes the parameterizer, interpolating the supplied parameters. +func (p *parametizer) exec() string { + for state := p.scanTextFn; state != nil; { + state = state() + } + return p.buf.String() +} + +// peek returns the next byte. +func (p *parametizer) peek() (byte, error) { + if p.pos >= len(p.z) { + return 0, io.EOF + } + return p.z[p.pos], nil +} + +// writeFrom writes the characters from ppos to pos to the buffer. +func (p *parametizer) writeFrom(ppos int) { + if p.pos > ppos { + // append remaining characters. + p.buf.Write(p.z[ppos:p.pos]) + } +} + +func (p *parametizer) scanTextFn() stateFn { + ppos := p.pos + for { + ch, err := p.peek() + if err != nil { + p.writeFrom(ppos) + return nil + } + + if ch == '%' { + p.writeFrom(ppos) + p.pos++ + return p.scanCodeFn + } + + p.pos++ + } +} + +func (p *parametizer) scanCodeFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + switch ch { + case '%': + p.buf.WriteByte('%') + + case ':': + // this character is used to avoid interpreting "%-" and "%+" as operators. + // the next character is where the format really begins. + p.pos++ + _, err = p.peek() + if err != nil { + return nil + } + return p.scanFormatFn + + case '#', ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': + return p.scanFormatFn + + case 'o': + p.buf.WriteString(strconv.FormatInt(int64(p.s.popInt()), 8)) + + case 'd': + p.buf.WriteString(strconv.Itoa(p.s.popInt())) + + case 'x': + p.buf.WriteString(strconv.FormatInt(int64(p.s.popInt()), 16)) + + case 'X': + p.buf.WriteString(strings.ToUpper(strconv.FormatInt(int64(p.s.popInt()), 16))) + + case 's': + p.buf.WriteString(p.s.popString()) + + case 'c': + p.buf.WriteByte(p.s.popByte()) + + case 'p': + p.pos++ + return p.pushParamFn + + case 'P': + p.pos++ + return p.setDsVarFn + + case 'g': + p.pos++ + return p.getDsVarFn + + case '\'': + p.pos++ + ch, err = p.peek() + if err != nil { + return nil + } + + p.s.push(ch) + + // skip the '\'' + p.pos++ + + case '{': + p.pos++ + return p.pushIntfn + + case 'l': + p.s.push(len(p.s.popString())) + + case '+': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai + bi) + + case '-': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai - bi) + + case '*': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai * bi) + + case '/': + bi, ai := p.s.popInt(), p.s.popInt() + if bi != 0 { + p.s.push(ai / bi) + } else { + p.s.push(0) + } + + case 'm': + bi, ai := p.s.popInt(), p.s.popInt() + if bi != 0 { + p.s.push(ai % bi) + } else { + p.s.push(0) + } + + case '&': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai & bi) + + case '|': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai | bi) + + case '^': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai ^ bi) + + case '=': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai == bi) + + case '>': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai > bi) + + case '<': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai < bi) + + case 'A': + bi, ai := p.s.popBool(), p.s.popBool() + p.s.push(ai && bi) + + case 'O': + bi, ai := p.s.popBool(), p.s.popBool() + p.s.push(ai || bi) + + case '!': + p.s.push(!p.s.popBool()) + + case '~': + p.s.push(^p.s.popInt()) + + case 'i': + for i := range p.params[:2] { + if n, ok := p.params[i].(int); ok { + p.params[i] = n + 1 + } + } + + case '?', ';': + + case 't': + return p.scanThenFn + + case 'e': + p.skipElse = true + return p.skipTextFn + } + + p.pos++ + + return p.scanTextFn +} + +func (p *parametizer) scanFormatFn() stateFn { + // the character was already read, so no need to check the error. + ch, _ := p.peek() + + // 6 should be the maximum length of a format string, for example "%:-9.9d". + f := []byte{'%', ch, 0, 0, 0, 0} + + var err error + + for { + p.pos++ + ch, err = p.peek() + if err != nil { + return nil + } + + f = append(f, ch) + switch ch { + case 'o', 'd', 'x', 'X': + fmt.Fprintf(p.buf, string(f), p.s.popInt()) + break + + case 's': + fmt.Fprintf(p.buf, string(f), p.s.popString()) + break + + case 'c': + fmt.Fprintf(p.buf, string(f), p.s.popByte()) + break + } + } + + p.pos++ + + return p.scanTextFn +} + +func (p *parametizer) pushParamFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + if ai := int(ch - '1'); ai >= 0 && ai < len(p.params) { + p.s.push(p.params[ai]) + } else { + p.s.push(0) + } + + // skip the '}' + p.pos++ + + return p.scanTextFn +} + +func (p *parametizer) setDsVarFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + if ch >= 'A' && ch <= 'Z' { + staticVars.Lock() + staticVars.vars[int(ch-'A')] = p.s.pop() + staticVars.Unlock() + } else if ch >= 'a' && ch <= 'z' { + p.vars[int(ch-'a')] = p.s.pop() + } + + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) getDsVarFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + var a byte + if ch >= 'A' && ch <= 'Z' { + a = 'A' + } else if ch >= 'a' && ch <= 'z' { + a = 'a' + } + + staticVars.Lock() + p.s.push(staticVars.vars[int(ch-a)]) + staticVars.Unlock() + + p.pos++ + + return p.scanTextFn +} + +func (p *parametizer) pushIntfn() stateFn { + var ai int + for { + ch, err := p.peek() + if err != nil { + return nil + } + + p.pos++ + if ch < '0' || ch > '9' { + p.s.push(ai) + return p.scanTextFn + } + + ai = (ai * 10) + int(ch-'0') + } +} + +func (p *parametizer) scanThenFn() stateFn { + p.pos++ + + if p.s.popBool() { + return p.scanTextFn + } + + p.skipElse = false + + return p.skipTextFn +} + +func (p *parametizer) skipTextFn() stateFn { + for { + ch, err := p.peek() + if err != nil { + return nil + } + + p.pos++ + if ch == '%' { + break + } + } + + if p.skipElse { + return p.skipElseFn + } + + return p.skipThenFn +} + +func (p *parametizer) skipThenFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + p.pos++ + switch ch { + case ';': + if p.nest == 0 { + return p.scanTextFn + } + p.nest-- + + case '?': + p.nest++ + + case 'e': + if p.nest == 0 { + return p.scanTextFn + } + } + + return p.skipTextFn +} + +func (p *parametizer) skipElseFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + + p.pos++ + switch ch { + case ';': + if p.nest == 0 { + return p.scanTextFn + } + p.nest-- + + case '?': + p.nest++ + } + + return p.skipTextFn +} + +// Printf evaluates a parameterized terminfo value z, interpolating params. +func Printf(z []byte, params ...interface{}) string { + p := newParametizer(z) + defer p.reset() + + // make sure we always have 9 parameters -- makes it easier + // later to skip checks and its faster + for i := 0; i < len(p.params) && i < len(params); i++ { + p.params[i] = params[i] + } + + return p.exec() +} + +// Fprintf evaluates a parameterized terminfo value z, interpolating params and +// writing to w. +func Fprintf(w io.Writer, z []byte, params ...interface{}) { + w.Write([]byte(Printf(z, params...))) +} diff --git a/vendor/github.com/xo/terminfo/stack.go b/vendor/github.com/xo/terminfo/stack.go new file mode 100644 index 0000000000..a6de395035 --- /dev/null +++ b/vendor/github.com/xo/terminfo/stack.go @@ -0,0 +1,48 @@ +package terminfo + +type stack []interface{} + +func (s *stack) push(v interface{}) { + *s = append(*s, v) +} + +func (s *stack) pop() interface{} { + if len(*s) == 0 { + return nil + } + v := (*s)[len(*s)-1] + *s = (*s)[:len(*s)-1] + return v +} + +func (s *stack) popInt() int { + if i, ok := s.pop().(int); ok { + return i + } + return 0 +} + +func (s *stack) popBool() bool { + if b, ok := s.pop().(bool); ok { + return b + } + return false +} + +func (s *stack) popByte() byte { + if b, ok := s.pop().(byte); ok { + return b + } + return 0 +} + +func (s *stack) popString() string { + if a, ok := s.pop().(string); ok { + return a + } + return "" +} + +func (s *stack) reset() { + *s = (*s)[:0] +} diff --git a/vendor/github.com/xo/terminfo/terminfo.go b/vendor/github.com/xo/terminfo/terminfo.go new file mode 100644 index 0000000000..8ebbf95995 --- /dev/null +++ b/vendor/github.com/xo/terminfo/terminfo.go @@ -0,0 +1,538 @@ +// Package terminfo implements reading terminfo files in pure go. +package terminfo + +import ( + "io" + "io/ioutil" + "path" + "strconv" + "strings" +) + +// Error is a terminfo error. +type Error string + +// Error satisfies the error interface. +func (err Error) Error() string { + return string(err) +} + +const ( + // ErrInvalidFileSize is the invalid file size error. + ErrInvalidFileSize Error = "invalid file size" + + // ErrUnexpectedFileEnd is the unexpected file end error. + ErrUnexpectedFileEnd Error = "unexpected file end" + + // ErrInvalidStringTable is the invalid string table error. + ErrInvalidStringTable Error = "invalid string table" + + // ErrInvalidMagic is the invalid magic error. + ErrInvalidMagic Error = "invalid magic" + + // ErrInvalidHeader is the invalid header error. + ErrInvalidHeader Error = "invalid header" + + // ErrInvalidNames is the invalid names error. + ErrInvalidNames Error = "invalid names" + + // ErrInvalidExtendedHeader is the invalid extended header error. + ErrInvalidExtendedHeader Error = "invalid extended header" + + // ErrEmptyTermName is the empty term name error. + ErrEmptyTermName Error = "empty term name" + + // ErrDatabaseDirectoryNotFound is the database directory not found error. + ErrDatabaseDirectoryNotFound Error = "database directory not found" + + // ErrFileNotFound is the file not found error. + ErrFileNotFound Error = "file not found" + + // ErrInvalidTermProgramVersion is the invalid TERM_PROGRAM_VERSION error. + ErrInvalidTermProgramVersion Error = "invalid TERM_PROGRAM_VERSION" +) + +// Terminfo describes a terminal's capabilities. +type Terminfo struct { + // File is the original source file. + File string + + // Names are the provided cap names. + Names []string + + // Bools are the bool capabilities. + Bools map[int]bool + + // BoolsM are the missing bool capabilities. + BoolsM map[int]bool + + // Nums are the num capabilities. + Nums map[int]int + + // NumsM are the missing num capabilities. + NumsM map[int]bool + + // Strings are the string capabilities. + Strings map[int][]byte + + // StringsM are the missing string capabilities. + StringsM map[int]bool + + // ExtBools are the extended bool capabilities. + ExtBools map[int]bool + + // ExtBoolsNames is the map of extended bool capabilities to their index. + ExtBoolNames map[int][]byte + + // ExtNums are the extended num capabilities. + ExtNums map[int]int + + // ExtNumsNames is the map of extended num capabilities to their index. + ExtNumNames map[int][]byte + + // ExtStrings are the extended string capabilities. + ExtStrings map[int][]byte + + // ExtStringsNames is the map of extended string capabilities to their index. + ExtStringNames map[int][]byte +} + +// Decode decodes the terminfo data contained in buf. +func Decode(buf []byte) (*Terminfo, error) { + var err error + + // check max file length + if len(buf) >= maxFileLength { + return nil, ErrInvalidFileSize + } + + d := &decoder{ + buf: buf, + len: len(buf), + } + + // read header + h, err := d.readInts(6, 16) + if err != nil { + return nil, err + } + + var numWidth int + + // check magic + if h[fieldMagic] == magic { + numWidth = 16 + } else if h[fieldMagic] == magicExtended { + numWidth = 32 + } else { + return nil, ErrInvalidMagic + } + + // check header + if hasInvalidCaps(h) { + return nil, ErrInvalidHeader + } + + // check remaining length + if d.len-d.pos < capLength(h) { + return nil, ErrUnexpectedFileEnd + } + + // read names + names, err := d.readBytes(h[fieldNameSize]) + if err != nil { + return nil, err + } + + // check name is terminated properly + i := findNull(names, 0) + if i == -1 { + return nil, ErrInvalidNames + } + names = names[:i] + + // read bool caps + bools, boolsM, err := d.readBools(h[fieldBoolCount]) + if err != nil { + return nil, err + } + + // read num caps + nums, numsM, err := d.readNums(h[fieldNumCount], numWidth) + if err != nil { + return nil, err + } + + // read string caps + strs, strsM, err := d.readStrings(h[fieldStringCount], h[fieldTableSize]) + if err != nil { + return nil, err + } + + ti := &Terminfo{ + Names: strings.Split(string(names), "|"), + Bools: bools, + BoolsM: boolsM, + Nums: nums, + NumsM: numsM, + Strings: strs, + StringsM: strsM, + } + + // at the end of file, so no extended caps + if d.pos >= d.len { + return ti, nil + } + + // decode extended header + eh, err := d.readInts(5, 16) + if err != nil { + return nil, err + } + + // check extended offset field + if hasInvalidExtOffset(eh) { + return nil, ErrInvalidExtendedHeader + } + + // check extended cap lengths + if d.len-d.pos != extCapLength(eh, numWidth) { + return nil, ErrInvalidExtendedHeader + } + + // read extended bool caps + ti.ExtBools, _, err = d.readBools(eh[fieldExtBoolCount]) + if err != nil { + return nil, err + } + + // read extended num caps + ti.ExtNums, _, err = d.readNums(eh[fieldExtNumCount], numWidth) + if err != nil { + return nil, err + } + + // read extended string data table indexes + extIndexes, err := d.readInts(eh[fieldExtOffsetCount], 16) + if err != nil { + return nil, err + } + + // read string data table + extData, err := d.readBytes(eh[fieldExtTableSize]) + if err != nil { + return nil, err + } + + // precautionary check that exactly at end of file + if d.pos != d.len { + return nil, ErrUnexpectedFileEnd + } + + var last int + // read extended string caps + ti.ExtStrings, last, err = readStrings(extIndexes, extData, eh[fieldExtStringCount]) + if err != nil { + return nil, err + } + extIndexes, extData = extIndexes[eh[fieldExtStringCount]:], extData[last:] + + // read extended bool names + ti.ExtBoolNames, _, err = readStrings(extIndexes, extData, eh[fieldExtBoolCount]) + if err != nil { + return nil, err + } + extIndexes = extIndexes[eh[fieldExtBoolCount]:] + + // read extended num names + ti.ExtNumNames, _, err = readStrings(extIndexes, extData, eh[fieldExtNumCount]) + if err != nil { + return nil, err + } + extIndexes = extIndexes[eh[fieldExtNumCount]:] + + // read extended string names + ti.ExtStringNames, _, err = readStrings(extIndexes, extData, eh[fieldExtStringCount]) + if err != nil { + return nil, err + } + //extIndexes = extIndexes[eh[fieldExtStringCount]:] + + return ti, nil +} + +// Open reads the terminfo file name from the specified directory dir. +func Open(dir, name string) (*Terminfo, error) { + var err error + var buf []byte + var filename string + for _, f := range []string{ + path.Join(dir, name[0:1], name), + path.Join(dir, strconv.FormatUint(uint64(name[0]), 16), name), + } { + buf, err = ioutil.ReadFile(f) + if err == nil { + filename = f + break + } + } + if buf == nil { + return nil, ErrFileNotFound + } + + // decode + ti, err := Decode(buf) + if err != nil { + return nil, err + } + + // save original file name + ti.File = filename + + // add to cache + termCache.Lock() + for _, n := range ti.Names { + termCache.db[n] = ti + } + termCache.Unlock() + + return ti, nil +} + +// boolCaps returns all bool and extended capabilities using f to format the +// index key. +func (ti *Terminfo) boolCaps(f func(int) string, extended bool) map[string]bool { + m := make(map[string]bool, len(ti.Bools)+len(ti.ExtBools)) + if !extended { + for k, v := range ti.Bools { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtBools { + m[string(ti.ExtBoolNames[k])] = v + } + } + return m +} + +// BoolCaps returns all bool capabilities. +func (ti *Terminfo) BoolCaps() map[string]bool { + return ti.boolCaps(BoolCapName, false) +} + +// BoolCapsShort returns all bool capabilities, using the short name as the +// index. +func (ti *Terminfo) BoolCapsShort() map[string]bool { + return ti.boolCaps(BoolCapNameShort, false) +} + +// ExtBoolCaps returns all extended bool capabilities. +func (ti *Terminfo) ExtBoolCaps() map[string]bool { + return ti.boolCaps(BoolCapName, true) +} + +// ExtBoolCapsShort returns all extended bool capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtBoolCapsShort() map[string]bool { + return ti.boolCaps(BoolCapNameShort, true) +} + +// numCaps returns all num and extended capabilities using f to format the +// index key. +func (ti *Terminfo) numCaps(f func(int) string, extended bool) map[string]int { + m := make(map[string]int, len(ti.Nums)+len(ti.ExtNums)) + if !extended { + for k, v := range ti.Nums { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtNums { + m[string(ti.ExtNumNames[k])] = v + } + } + return m +} + +// NumCaps returns all num capabilities. +func (ti *Terminfo) NumCaps() map[string]int { + return ti.numCaps(NumCapName, false) +} + +// NumCapsShort returns all num capabilities, using the short name as the +// index. +func (ti *Terminfo) NumCapsShort() map[string]int { + return ti.numCaps(NumCapNameShort, false) +} + +// ExtNumCaps returns all extended num capabilities. +func (ti *Terminfo) ExtNumCaps() map[string]int { + return ti.numCaps(NumCapName, true) +} + +// ExtNumCapsShort returns all extended num capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtNumCapsShort() map[string]int { + return ti.numCaps(NumCapNameShort, true) +} + +// stringCaps returns all string and extended capabilities using f to format the +// index key. +func (ti *Terminfo) stringCaps(f func(int) string, extended bool) map[string][]byte { + m := make(map[string][]byte, len(ti.Strings)+len(ti.ExtStrings)) + if !extended { + for k, v := range ti.Strings { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtStrings { + m[string(ti.ExtStringNames[k])] = v + } + } + return m +} + +// StringCaps returns all string capabilities. +func (ti *Terminfo) StringCaps() map[string][]byte { + return ti.stringCaps(StringCapName, false) +} + +// StringCapsShort returns all string capabilities, using the short name as the +// index. +func (ti *Terminfo) StringCapsShort() map[string][]byte { + return ti.stringCaps(StringCapNameShort, false) +} + +// ExtStringCaps returns all extended string capabilities. +func (ti *Terminfo) ExtStringCaps() map[string][]byte { + return ti.stringCaps(StringCapName, true) +} + +// ExtStringCapsShort returns all extended string capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtStringCapsShort() map[string][]byte { + return ti.stringCaps(StringCapNameShort, true) +} + +// Has determines if the bool cap i is present. +func (ti *Terminfo) Has(i int) bool { + return ti.Bools[i] +} + +// Num returns the num cap i, or -1 if not present. +func (ti *Terminfo) Num(i int) int { + n, ok := ti.Nums[i] + if !ok { + return -1 + } + return n +} + +// Printf formats the string cap i, interpolating parameters v. +func (ti *Terminfo) Printf(i int, v ...interface{}) string { + return Printf(ti.Strings[i], v...) +} + +// Fprintf prints the string cap i to writer w, interpolating parameters v. +func (ti *Terminfo) Fprintf(w io.Writer, i int, v ...interface{}) { + Fprintf(w, ti.Strings[i], v...) +} + +// Color takes a foreground and background color and returns string that sets +// them for this terminal. +func (ti *Terminfo) Colorf(fg, bg int, str string) string { + maxColors := int(ti.Nums[MaxColors]) + + // map bright colors to lower versions if the color table only holds 8. + if maxColors == 8 { + if fg > 7 && fg < 16 { + fg -= 8 + } + if bg > 7 && bg < 16 { + bg -= 8 + } + } + + var s string + if maxColors > fg && fg >= 0 { + s += ti.Printf(SetAForeground, fg) + } + if maxColors > bg && bg >= 0 { + s += ti.Printf(SetABackground, bg) + } + return s + str + ti.Printf(ExitAttributeMode) +} + +// Goto returns a string suitable for addressing the cursor at the given +// row and column. The origin 0, 0 is in the upper left corner of the screen. +func (ti *Terminfo) Goto(row, col int) string { + return Printf(ti.Strings[CursorAddress], row, col) +} + +// Puts emits the string to the writer, but expands inline padding indications +// (of the form $<[delay]> where [delay] is msec) to a suitable number of +// padding characters (usually null bytes) based upon the supplied baud. At +// high baud rates, more padding characters will be inserted. +/*func (ti *Terminfo) Puts(w io.Writer, s string, lines, baud int) (int, error) { + var err error + for { + start := strings.Index(s, "$<") + if start == -1 { + // most strings don't need padding, which is good news! + return io.WriteString(w, s) + } + + end := strings.Index(s, ">") + if end == -1 { + // unterminated... just emit bytes unadulterated. + return io.WriteString(w, "$<"+s) + } + + var c int + c, err = io.WriteString(w, s[:start]) + if err != nil { + return n + c, err + } + n += c + + s = s[start+2:] + val := s[:end] + s = s[end+1:] + var ms int + var dot, mandatory, asterisk bool + unit := 1000 + for _, ch := range val { + switch { + case ch >= '0' && ch <= '9': + ms = (ms * 10) + int(ch-'0') + if dot { + unit *= 10 + } + case ch == '.' && !dot: + dot = true + case ch == '*' && !asterisk: + ms *= lines + asterisk = true + case ch == '/': + mandatory = true + default: + break + } + } + + z, pad := ((baud/8)/unit)*ms, ti.Strings[PadChar] + b := make([]byte, len(pad)*z) + for bp := copy(b, pad); bp < len(b); bp *= 2 { + copy(b[bp:], b[:bp]) + } + + if (!ti.Bools[XonXoff] && baud > int(ti.Nums[PaddingBaudRate])) || mandatory { + c, err = w.Write(b) + if err != nil { + return n + c, err + } + n += c + } + } + + return n, nil +}*/ diff --git a/vendor/github.com/xo/terminfo/util.go b/vendor/github.com/xo/terminfo/util.go new file mode 100644 index 0000000000..56f47e8110 --- /dev/null +++ b/vendor/github.com/xo/terminfo/util.go @@ -0,0 +1,266 @@ +package terminfo + +import ( + "sort" +) + +const ( + // maxFileLength is the max file length. + maxFileLength = 4096 + + // magic is the file magic for terminfo files. + magic = 0432 + + // magicExtended is the file magic for terminfo files with the extended number format. + magicExtended = 01036 +) + +// header fields. +const ( + fieldMagic = iota + fieldNameSize + fieldBoolCount + fieldNumCount + fieldStringCount + fieldTableSize +) + +// header extended fields. +const ( + fieldExtBoolCount = iota + fieldExtNumCount + fieldExtStringCount + fieldExtOffsetCount + fieldExtTableSize +) + +// hasInvalidCaps determines if the capabilities in h are invalid. +func hasInvalidCaps(h []int) bool { + return h[fieldBoolCount] > CapCountBool || + h[fieldNumCount] > CapCountNum || + h[fieldStringCount] > CapCountString +} + +// capLength returns the total length of the capabilities in bytes. +func capLength(h []int) int { + return h[fieldNameSize] + + h[fieldBoolCount] + + (h[fieldNameSize]+h[fieldBoolCount])%2 + // account for word align + h[fieldNumCount]*2 + + h[fieldStringCount]*2 + + h[fieldTableSize] +} + +// hasInvalidExtOffset determines if the extended offset field is valid. +func hasInvalidExtOffset(h []int) bool { + return h[fieldExtBoolCount]+ + h[fieldExtNumCount]+ + h[fieldExtStringCount]*2 != h[fieldExtOffsetCount] +} + +// extCapLength returns the total length of extended capabilities in bytes. +func extCapLength(h []int, numWidth int) int { + return h[fieldExtBoolCount] + + h[fieldExtBoolCount]%2 + // account for word align + h[fieldExtNumCount]*(numWidth/8) + + h[fieldExtOffsetCount]*2 + + h[fieldExtTableSize] +} + +// findNull finds the position of null in buf. +func findNull(buf []byte, i int) int { + for ; i < len(buf); i++ { + if buf[i] == 0 { + return i + } + } + return -1 +} + +// readStrings decodes n strings from string data table buf using the indexes in idx. +func readStrings(idx []int, buf []byte, n int) (map[int][]byte, int, error) { + var last int + m := make(map[int][]byte) + for i := 0; i < n; i++ { + start := idx[i] + if start < 0 { + continue + } + if end := findNull(buf, start); end != -1 { + m[i], last = buf[start:end], end+1 + } else { + return nil, 0, ErrInvalidStringTable + } + } + return m, last, nil +} + +// decoder holds state info while decoding a terminfo file. +type decoder struct { + buf []byte + pos int + len int +} + +// readBytes reads the next n bytes of buf, incrementing pos by n. +func (d *decoder) readBytes(n int) ([]byte, error) { + if d.len < d.pos+n { + return nil, ErrUnexpectedFileEnd + } + n, d.pos = d.pos, d.pos+n + return d.buf[n:d.pos], nil +} + +// readInts reads n number of ints with width w. +func (d *decoder) readInts(n, w int) ([]int, error) { + w /= 8 + l := n * w + + buf, err := d.readBytes(l) + if err != nil { + return nil, err + } + + // align + d.pos += d.pos % 2 + + z := make([]int, n) + for i, j := 0, 0; i < l; i, j = i+w, j+1 { + switch w { + case 1: + z[i] = int(buf[i]) + case 2: + z[j] = int(int16(buf[i+1])<<8 | int16(buf[i])) + case 4: + z[j] = int(buf[i+3])<<24 | int(buf[i+2])<<16 | int(buf[i+1])<<8 | int(buf[i]) + } + } + + return z, nil +} + +// readBools reads the next n bools. +func (d *decoder) readBools(n int) (map[int]bool, map[int]bool, error) { + buf, err := d.readInts(n, 8) + if err != nil { + return nil, nil, err + } + + // process + bools, boolsM := make(map[int]bool), make(map[int]bool) + for i, b := range buf { + bools[i] = b == 1 + if int8(b) == -2 { + boolsM[i] = true + } + } + + return bools, boolsM, nil +} + +// readNums reads the next n nums. +func (d *decoder) readNums(n, w int) (map[int]int, map[int]bool, error) { + buf, err := d.readInts(n, w) + if err != nil { + return nil, nil, err + } + + // process + nums, numsM := make(map[int]int), make(map[int]bool) + for i := 0; i < n; i++ { + nums[i] = buf[i] + if buf[i] == -2 { + numsM[i] = true + } + } + + return nums, numsM, nil +} + +// readStringTable reads the string data for n strings and the accompanying data +// table of length sz. +func (d *decoder) readStringTable(n, sz int) ([][]byte, []int, error) { + buf, err := d.readInts(n, 16) + if err != nil { + return nil, nil, err + } + + // read string data table + data, err := d.readBytes(sz) + if err != nil { + return nil, nil, err + } + + // align + d.pos += d.pos % 2 + + // process + s := make([][]byte, n) + var m []int + for i := 0; i < n; i++ { + start := buf[i] + if start == -2 { + m = append(m, i) + } else if start >= 0 { + if end := findNull(data, start); end != -1 { + s[i] = data[start:end] + } else { + return nil, nil, ErrInvalidStringTable + } + } + } + + return s, m, nil +} + +// readStrings reads the next n strings and processes the string data table of +// length sz. +func (d *decoder) readStrings(n, sz int) (map[int][]byte, map[int]bool, error) { + s, m, err := d.readStringTable(n, sz) + if err != nil { + return nil, nil, err + } + + strs := make(map[int][]byte) + for k, v := range s { + if k == AcsChars { + v = canonicalizeAscChars(v) + } + strs[k] = v + } + + strsM := make(map[int]bool, len(m)) + for _, k := range m { + strsM[k] = true + } + + return strs, strsM, nil +} + +// canonicalizeAscChars reorders chars to be unique, in order. +// +// see repair_ascc in ncurses-6.0/progs/dump_entry.c +func canonicalizeAscChars(z []byte) []byte { + var c chars + enc := make(map[byte]byte, len(z)/2) + for i := 0; i < len(z); i += 2 { + if _, ok := enc[z[i]]; !ok { + a, b := z[i], z[i+1] + //log.Printf(">>> a: %d %c, b: %d %c", a, a, b, b) + c, enc[a] = append(c, b), b + } + } + sort.Sort(c) + + r := make([]byte, 2*len(c)) + for i := 0; i < len(c); i++ { + r[i*2], r[i*2+1] = c[i], enc[c[i]] + } + return r +} + +type chars []byte + +func (c chars) Len() int { return len(c) } +func (c chars) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c chars) Less(i, j int) bool { return c[i] < c[j] } diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/LICENSE similarity index 100% rename from vendor/github.com/uber/jaeger-lib/LICENSE rename to vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/LICENSE diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/config.go new file mode 100644 index 0000000000..369735ee6e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/config.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on https://github.com/go-kit/kit/blob/3796a6b25f5c6c545454d3ed7187c4ced258083d/tracing/opencensus/endpoint_options.go + +package otelkit // import "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// config holds the options for tracing an endpoint. +type config struct { + // TracerProvider provides access to instrumentation Tracers. + TracerProvider trace.TracerProvider + + // IgnoreBusinessError if set to true will not treat a business error + // identified through the endpoint.Failer interface as a span error. + IgnoreBusinessError bool + + // Operation identifies the current operation and serves as a span name. + Operation string + + // GetOperation is an optional function that can set the span name based on the existing operation + // for the endpoint and information in the context. + // + // If the function is nil, or the returned operation is empty, the existing operation for the endpoint is used. + GetOperation func(ctx context.Context, operation string) string + + // Attributes holds the default attributes for each span created by this middleware. + Attributes []attribute.KeyValue + + // GetAttributes is an optional function that can extract trace attributes + // from the context and add them to the span. + GetAttributes func(ctx context.Context) []attribute.KeyValue +} + +// Option configures an EndpointMiddleware. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(o *config) { + if provider != nil { + o.TracerProvider = provider + } + }) +} + +// WithIgnoreBusinessError if set to true will not treat a business error +// identified through the endpoint.Failer interface as a span error. +func WithIgnoreBusinessError(val bool) Option { + return optionFunc(func(o *config) { + o.IgnoreBusinessError = val + }) +} + +// WithOperation sets an operation name for an endpoint. +// Use this when you register a middleware for each endpoint. +func WithOperation(operation string) Option { + return optionFunc(func(o *config) { + o.Operation = operation + }) +} + +// WithOperationGetter sets an operation name getter function in config. +func WithOperationGetter(fn func(ctx context.Context, name string) string) Option { + return optionFunc(func(o *config) { + o.GetOperation = fn + }) +} + +// WithAttributes sets the default attributes for the spans created by the Endpoint tracer. +func WithAttributes(attrs ...attribute.KeyValue) Option { + return optionFunc(func(o *config) { + o.Attributes = attrs + }) +} + +// WithAttributeGetter extracts additional attributes from the context. +func WithAttributeGetter(fn func(ctx context.Context) []attribute.KeyValue) Option { + return optionFunc(func(o *config) { + o.GetAttributes = fn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/doc.go new file mode 100644 index 0000000000..62c437be93 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelkit instruments the github.com/go-kit/kit package. +// +// Compared to other instrumentation libraries provided by go-kit itself, +// this package only provides instrumentation for the endpoint layer. +// For instrumenting the transport layer, +// look at the instrumentation libraries provided by go.opentelemetry.io/contrib. +// Learn more about go-kit's layers at https://gokit.io/faq/#architecture-and-design. +package otelkit // import "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/endpoint.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/endpoint.go new file mode 100644 index 0000000000..6a7097ecf4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/endpoint.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on https://github.com/go-kit/kit/blob/3796a6b25f5c6c545454d3ed7187c4ced258083d/tracing/opencensus/endpoint.go + +package otelkit // import "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + +import ( + "context" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + + "github.com/go-kit/kit/endpoint" + "github.com/go-kit/kit/sd/lb" +) + +const ( + tracerName = "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + + // defaultSpanName is the default endpoint span name to use. + defaultSpanName = "gokit/endpoint" +) + +// EndpointMiddleware returns an Endpoint middleware, tracing a Go kit endpoint. +// This endpoint middleware should be used in combination with a Go kit Transport +// tracing middleware, generic OpenTelemetry transport middleware or custom before +// and after transport functions. +func EndpointMiddleware(options ...Option) endpoint.Middleware { + cfg := &config{} + + for _, o := range options { + o.apply(cfg) + } + + if cfg.TracerProvider == nil { + cfg.TracerProvider = otel.GetTracerProvider() + } + + tracer := cfg.TracerProvider.Tracer( + tracerName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, request interface{}) (response interface{}, err error) { + operation := cfg.Operation + if cfg.GetOperation != nil { + if newOperation := cfg.GetOperation(ctx, operation); newOperation != "" { + operation = newOperation + } + } + + spanName := operation + if spanName == "" { + spanName = defaultSpanName + } + + opts := []trace.SpanStartOption{ + trace.WithAttributes(cfg.Attributes...), + trace.WithSpanKind(trace.SpanKindServer), + } + + if cfg.GetAttributes != nil { + opts = append(opts, trace.WithAttributes(cfg.GetAttributes(ctx)...)) + } + + ctx, span := tracer.Start(ctx, spanName, opts...) + defer span.End() + + defer func() { + if err != nil { + if lberr, ok := err.(lb.RetryError); ok { + // Handle errors originating from lb.Retry. + for idx, rawErr := range lberr.RawErrors { + span.RecordError(rawErr, trace.WithAttributes( + attribute.Int("gokit.lb.retry.count", idx+1), + )) + } + + span.RecordError(lberr.Final) + span.SetStatus(codes.Error, lberr.Error()) + + return + } + + // generic error + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + + return + } + + // Test for business error. Business errors are often + // successful requests carrying a business failure that + // the client can act upon and therefore do not count + // as failed requests. + if res, ok := response.(endpoint.Failer); ok && res.Failed() != nil { + span.RecordError(res.Failed()) + + if !cfg.IgnoreBusinessError { + span.SetStatus(codes.Error, res.Failed().Error()) + } + + return + } + // no errors identified + }() + + response, err = next(ctx, request) + + return + } + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/version.go new file mode 100644 index 0000000000..ecd3b4773a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelkit // import "go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit" + +// Version is the current release version of the go-kit instrumentation. +func Version() string { + return "0.38.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go new file mode 100644 index 0000000000..2ae8620fb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + // instrumentationName is the name of this instrumentation package. + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. + GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +// Filter is a predicate used to determine whether a given request in +// interceptor info should be traced. A Filter must return true if +// the request should be traced. +type Filter func(*InterceptorInfo) bool + +// config is a group of options for this instrumentation. +type config struct { + Filter Filter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + + meter metric.Meter + rpcServerDuration instrument.Int64Histogram +} + +// Option applies an option value for a config. +type Option interface { + apply(*config) +} + +// newConfig returns a config configured with all the passed Options. +func newConfig(opts []Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + TracerProvider: otel.GetTracerProvider(), + MeterProvider: global.MeterProvider(), + } + for _, o := range opts { + o.apply(c) + } + + c.meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(SemVersion()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + var err error + if c.rpcServerDuration, err = c.meter.Int64Histogram("rpc.server.duration", instrument.WithUnit("ms")); err != nil { + otel.Handle(err) + } + + return c +} + +type propagatorsOption struct{ p propagation.TextMapPropagator } + +func (o propagatorsOption) apply(c *config) { + if o.p != nil { + c.Propagators = o.p + } +} + +// WithPropagators returns an Option to use the Propagators when extracting +// and injecting trace context from requests. +func WithPropagators(p propagation.TextMapPropagator) Option { + return propagatorsOption{p: p} +} + +type tracerProviderOption struct{ tp trace.TracerProvider } + +func (o tracerProviderOption) apply(c *config) { + if o.tp != nil { + c.TracerProvider = o.tp + } +} + +// WithInterceptorFilter returns an Option to use the request filter. +func WithInterceptorFilter(f Filter) Option { + return interceptorFilterOption{f: f} +} + +type interceptorFilterOption struct { + f Filter +} + +func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.Filter = o.f + } +} + +// WithTracerProvider returns an Option to use the TracerProvider when +// creating a Tracer. +func WithTracerProvider(tp trace.TracerProvider) Option { + return tracerProviderOption{tp: tp} +} + +type meterProviderOption struct{ mp metric.MeterProvider } + +func (o meterProviderOption) apply(c *config) { + if o.mp != nil { + c.MeterProvider = o.mp + } +} + +// WithMeterProvider returns an Option to use the MeterProvider when +// creating a Meter. If this option is not provide the global MeterProvider will be used. +func WithMeterProvider(mp metric.MeterProvider) Option { + return meterProviderOption{mp: mp} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go new file mode 100644 index 0000000000..b74d558e37 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -0,0 +1,501 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// gRPC tracing middleware +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +import ( + "context" + "io" + "net" + "strconv" + "time" + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +type messageType attribute.KeyValue + +// Event adds an event of the messageType to the span associated with the +// passed context with a message id. +func (m messageType) Event(ctx context.Context, id int, _ interface{}) { + span := trace.SpanFromContext(ctx) + if !span.IsRecording() { + return + } + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + )) +} + +var ( + messageSent = messageType(RPCMessageTypeSent) + messageReceived = messageType(RPCMessageTypeReceived) +) + +// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + callOpts ...grpc.CallOption, + ) error { + i := &InterceptorInfo{ + Method: method, + Type: UnaryClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return invoker(ctx, method, req, reply, cc, callOpts...) + } + + name, attr := spanInfo(method, cc.Target()) + var span trace.Span + ctx, span = tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + ) + defer span.End() + + ctx = inject(ctx, cfg.Propagators) + + messageSent.Event(ctx, 1, req) + + err := invoker(ctx, method, req, reply, cc, callOpts...) + + messageReceived.Event(ctx, 1, reply) + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +type streamEventType int + +type streamEvent struct { + Type streamEventType + Err error +} + +const ( + receiveEndEvent streamEventType = iota + errorEvent +) + +// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and +// SendMsg method call. +type clientStream struct { + grpc.ClientStream + + desc *grpc.StreamDesc + events chan streamEvent + eventsDone chan struct{} + finished chan error + + receivedMessageID int + sentMessageID int +} + +var _ = proto.Marshal + +func (w *clientStream) RecvMsg(m interface{}) error { + err := w.ClientStream.RecvMsg(m) + + if err == nil && !w.desc.ServerStreams { + w.sendStreamEvent(receiveEndEvent, nil) + } else if err == io.EOF { + w.sendStreamEvent(receiveEndEvent, nil) + } else if err != nil { + w.sendStreamEvent(errorEvent, err) + } else { + w.receivedMessageID++ + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + + return err +} + +func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ + messageSent.Event(w.Context(), w.sentMessageID, m) + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return err +} + +func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return md, err +} + +func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return err +} + +func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream { + events := make(chan streamEvent) + eventsDone := make(chan struct{}) + finished := make(chan error) + + go func() { + defer close(eventsDone) + + for { + select { + case event := <-events: + switch event.Type { + case receiveEndEvent: + finished <- nil + return + case errorEvent: + finished <- event.Err + return + } + case <-ctx.Done(): + finished <- ctx.Err() + return + } + } + }() + + return &clientStream{ + ClientStream: s, + desc: desc, + events: events, + eventsDone: eventsDone, + finished: finished, + } +} + +func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { + select { + case <-w.eventsDone: + case w.events <- streamEvent{Type: eventType, Err: err}: + } +} + +// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + callOpts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + i := &InterceptorInfo{ + Method: method, + Type: StreamClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return streamer(ctx, desc, cc, method, callOpts...) + } + + name, attr := spanInfo(method, cc.Target()) + var span trace.Span + ctx, span = tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + ) + + ctx = inject(ctx, cfg.Propagators) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { + grpcStatus, _ := status.FromError(err) + span.SetStatus(codes.Error, grpcStatus.Message()) + span.SetAttributes(statusCodeAttr(grpcStatus.Code())) + span.End() + return s, err + } + stream := wrapClientStream(ctx, s, desc) + + go func() { + err := <-stream.finished + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + span.End() + }() + + return stream, nil + } +} + +// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + i := &InterceptorInfo{ + UnaryServerInfo: info, + Type: UnaryServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(ctx, req) + } + + ctx = extract(ctx, cfg.Propagators) + + name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + ) + defer span.End() + + messageReceived.Event(ctx, 1, req) + + var statusCode grpc_codes.Code + defer func(t time.Time) { + elapsedTime := time.Since(t) / time.Millisecond + attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode))) + cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), attr...) + }(time.Now()) + + resp, err := handler(ctx, req) + if err != nil { + s, _ := status.FromError(err) + statusCode = s.Code() + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + messageSent.Event(ctx, 1, s.Proto()) + } else { + statusCode = grpc_codes.OK + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + messageSent.Event(ctx, 1, resp) + } + + return resp, err + } +} + +// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverStream struct { + grpc.ServerStream + ctx context.Context + + receivedMessageID int + sentMessageID int +} + +func (w *serverStream) Context() context.Context { + return w.ctx +} + +func (w *serverStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + + if err == nil { + w.receivedMessageID++ + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + + return err +} + +func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ + messageSent.Event(w.Context(), w.sentMessageID, m) + + return err +} + +func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { + return &serverStream{ + ServerStream: ss, + ctx: ctx, + } +} + +// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + return func( + srv interface{}, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx := ss.Context() + i := &InterceptorInfo{ + StreamServerInfo: info, + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(srv, wrapServerStream(ctx, ss)) + } + + ctx = extract(ctx, cfg.Propagators) + + name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + ) + defer span.End() + + err := handler(srv, wrapServerStream(ctx, ss)) + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// spanInfo returns a span name and all appropriate attributes from the gRPC +// method and peer address. +func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) { + attrs := []attribute.KeyValue{RPCSystemGRPC} + name, mAttrs := internal.ParseFullMethod(fullMethod) + attrs = append(attrs, mAttrs...) + attrs = append(attrs, peerAttr(peerAddress)...) + return name, attrs +} + +// peerAttr returns attributes about the peer address. +func peerAttr(addr string) []attribute.KeyValue { + host, p, err := net.SplitHostPort(addr) + if err != nil { + return []attribute.KeyValue(nil) + } + + if host == "" { + host = "127.0.0.1" + } + port, err := strconv.Atoi(p) + if err != nil { + return []attribute.KeyValue(nil) + } + + var attr []attribute.KeyValue + if ip := net.ParseIP(host); ip != nil { + attr = []attribute.KeyValue{ + semconv.NetSockPeerAddr(host), + semconv.NetSockPeerPort(port), + } + } else { + attr = []attribute.KeyValue{ + semconv.NetPeerName(host), + semconv.NetPeerPort(port), + } + } + + return attr +} + +// peerFromCtx returns a peer address from a context, if one exists. +func peerFromCtx(ctx context.Context) string { + p, ok := peer.FromContext(ctx) + if !ok { + return "" + } + return p.Addr.String() +} + +// statusCodeAttr returns status code attribute based on given gRPC code. +func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go new file mode 100644 index 0000000000..f6116946bf --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc" +) + +// InterceptorType is the flag to define which gRPC interceptor +// the InterceptorInfo object is. +type InterceptorType uint8 + +const ( + // UndefinedInterceptor is the type for the interceptor information that is not + // well initialized or categorized to other types. + UndefinedInterceptor InterceptorType = iota + // UnaryClient is the type for grpc.UnaryClient interceptor. + UnaryClient + // StreamClient is the type for grpc.StreamClient interceptor. + StreamClient + // UnaryServer is the type for grpc.UnaryServer interceptor. + UnaryServer + // StreamServer is the type for grpc.StreamServer interceptor. + StreamServer +) + +// InterceptorInfo is the union of some arguments to four types of +// gRPC interceptors. +type InterceptorInfo struct { + // Method is method name registered to UnaryClient and StreamClient + Method string + // UnaryServerInfo is the metadata for UnaryServer + UnaryServerInfo *grpc.UnaryServerInfo + // StreamServerInfo if the metadata for StreamServer + StreamServerInfo *grpc.StreamServerInfo + // Type is the type for interceptor + Type InterceptorType +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go new file mode 100644 index 0000000000..c40f87c4f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + +import ( + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// ParseFullMethod returns a span name following the OpenTelemetry semantic +// conventions as well as all applicable span attribute.KeyValue attributes based +// on a gRPC's FullMethod. +func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { + name := strings.TrimLeft(fullMethod, "/") + parts := strings.SplitN(name, "/", 2) + if len(parts) != 2 { + // Invalid format, does not follow `/package.service/method`. + return name, []attribute.KeyValue(nil) + } + + var attrs []attribute.KeyValue + if service := parts[0]; service != "" { + attrs = append(attrs, semconv.RPCService(service)) + } + if method := parts[1]; method != "" { + attrs = append(attrs, semconv.RPCMethod(method)) + } + return name, attrs +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go new file mode 100644 index 0000000000..d91c6df237 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface. +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +// Deprecated: Unnecessary public func. +func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { + c := newConfig(opts) + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: md, + }) +} + +func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + propagators.Inject(ctx, &metadataSupplier{ + metadata: &md, + }) + return metadata.NewOutgoingContext(ctx, md) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +// Deprecated: Unnecessary public func. +func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts) + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: md, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + return propagators.Extract(ctx, &metadataSupplier{ + metadata: &md, + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go new file mode 100644 index 0000000000..b65fab308f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// Semantic conventions for attribute keys for gRPC. +const ( + // Name of message transmitted or received. + RPCNameKey = attribute.Key("name") + + // Type of message transmitted or received. + RPCMessageTypeKey = attribute.Key("message.type") + + // Identifier of message transmitted or received. + RPCMessageIDKey = attribute.Key("message.id") + + // The compressed size of the message transmitted or received in bytes. + RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // The uncompressed size of the message transmitted or received in + // bytes. + RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +// Semantic conventions for common RPC attributes. +var ( + // Semantic convention for gRPC as the remoting system. + RPCSystemGRPC = semconv.RPCSystemGRPC + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") + + // Semantic conventions for RPC message types. + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go new file mode 100644 index 0000000000..78cac03ed1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +// Version is the current release version of the gRPC instrumentation. +func Version() string { + return "0.40.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go new file mode 100644 index 0000000000..8a62451f74 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/context.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +import "context" + +type jaegerKeyType int + +const ( + debugKey jaegerKeyType = iota +) + +// withDebug returns a copy of parent with debug set as the debug flag value . +func withDebug(parent context.Context, debug bool) context.Context { + return context.WithValue(parent, debugKey, debug) +} + +// debugFromContext returns the debug value stored in ctx. +// +// If no debug value is stored in ctx false is returned. +func debugFromContext(ctx context.Context) bool { + if ctx == nil { + return false + } + if debug, ok := ctx.Value(debugKey).(bool); ok { + return debug + } + return false +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go new file mode 100644 index 0000000000..1d3fb9f89f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jaeger implements the Jaeger propagator specification as defined at +// https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go new file mode 100644 index 0000000000..08b5faa5d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/jaeger_propagator.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + jaegerHeader = "uber-trace-id" + separator = ":" + traceID128bitsWidth = 128 / 4 + spanIDWidth = 64 / 4 + + idPaddingChar = "0" + + flagsDebug = 0x02 + flagsSampled = 0x01 + flagsNotSampled = 0x00 + + deprecatedParentSpanID = "0" +) + +var ( + empty = trace.SpanContext{} + + errMalformedTraceContextVal = errors.New("header value of uber-trace-id should contain four different part separated by : ") + errInvalidTraceIDLength = errors.New("invalid trace id length, must be either 16 or 32") + errMalformedTraceID = errors.New("cannot decode trace id from header, should be a string of hex, lowercase trace id can't be all zero") + errInvalidSpanIDLength = errors.New("invalid span id length, must be 16") + errMalformedSpanID = errors.New("cannot decode span id from header, should be a string of hex, lowercase span id can't be all zero") + errMalformedFlag = errors.New("cannot decode flag") +) + +// Jaeger propagator serializes SpanContext to/from Jaeger Headers +// +// Jaeger format: +// +// uber-trace-id: {trace-id}:{span-id}:{parent-span-id}:{flags}. +type Jaeger struct{} + +var _ propagation.TextMapPropagator = &Jaeger{} + +// Inject injects a context to the carrier following jaeger format. +// The parent span ID is set to an dummy parent span id as the most implementations do. +func (jaeger Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + sc := trace.SpanFromContext(ctx).SpanContext() + headers := []string{} + if !sc.TraceID().IsValid() || !sc.SpanID().IsValid() { + return + } + headers = append(headers, sc.TraceID().String(), sc.SpanID().String(), deprecatedParentSpanID) + if debugFromContext(ctx) { + headers = append(headers, fmt.Sprintf("%x", flagsDebug|flagsSampled)) + } else if sc.IsSampled() { + headers = append(headers, fmt.Sprintf("%x", flagsSampled)) + } else { + headers = append(headers, fmt.Sprintf("%x", flagsNotSampled)) + } + + carrier.Set(jaegerHeader, strings.Join(headers, separator)) +} + +// Extract extracts a context from the carrier if it contains Jaeger headers. +func (jaeger Jaeger) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + // extract tracing information + if h := carrier.Get(jaegerHeader); h != "" { + ctx, sc, err := extract(ctx, h) + if err == nil && sc.IsValid() { + return trace.ContextWithRemoteSpanContext(ctx, sc) + } + } + + return ctx +} + +func extract(ctx context.Context, headerVal string) (context.Context, trace.SpanContext, error) { + var ( + scc = trace.SpanContextConfig{} + err error + ) + + parts := strings.Split(headerVal, separator) + if len(parts) != 4 { + return ctx, empty, errMalformedTraceContextVal + } + + // extract trace ID + if parts[0] != "" { + id := parts[0] + if len(id) > traceID128bitsWidth { + return ctx, empty, errInvalidTraceIDLength + } + // padding when length is less than 32 + if len(id) < traceID128bitsWidth { + padCharCount := traceID128bitsWidth - len(id) + id = strings.Repeat(idPaddingChar, padCharCount) + id + } + scc.TraceID, err = trace.TraceIDFromHex(id) + if err != nil { + return ctx, empty, errMalformedTraceID + } + } + + // extract span ID + if parts[1] != "" { + id := parts[1] + if len(id) > spanIDWidth { + return ctx, empty, errInvalidSpanIDLength + } + // padding when length is less than 16 + if len(id) < spanIDWidth { + padCharCount := spanIDWidth - len(id) + id = strings.Repeat(idPaddingChar, padCharCount) + id + } + scc.SpanID, err = trace.SpanIDFromHex(id) + if err != nil { + return ctx, empty, errMalformedSpanID + } + } + + // skip third part as it is deprecated + + // extract flag + if parts[3] != "" { + flagStr := parts[3] + flag, err := strconv.ParseInt(flagStr, 16, 64) + if err != nil { + return ctx, empty, errMalformedFlag + } + if flag&flagsSampled == flagsSampled { + // if sample bit is set, we check if debug bit is also set + if flag&flagsDebug == flagsDebug { + scc.TraceFlags |= trace.FlagsSampled + ctx = withDebug(ctx, true) + } else { + scc.TraceFlags |= trace.FlagsSampled + } + } + // ignore other bit, including firehose since we don't have corresponding flag in trace context. + } + return ctx, trace.NewSpanContext(scc), nil +} + +// Fields returns the Jaeger header key whose value is set with Inject. +func (jaeger Jaeger) Fields() []string { + return []string{jaegerHeader} +} diff --git a/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go new file mode 100644 index 0000000000..793c8bf42c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/propagators/jaeger/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger" + +// Version is the current release version of the Jaeger propagator. +func Version() string { + return "1.15.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 0000000000..314766e91b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 0000000000..0b605b3d67 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,21 @@ +.DS_Store +Thumbs.db + +.tools/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* + +gen/ + +/example/fib/fib +/example/fib/traces.txt +/example/jaeger/jaeger +/example/namedtracer/namedtracer +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin +/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 0000000000..38a1f56982 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 0000000000..0f099f5759 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,244 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - "crypto/md5" + - "crypto/sha1" + - "crypto/**/pkix" + ignore-file-rules: + - "**/*_test.go" + additional-guards: + # Do not allow testing packages in non-test files. + - list-type: denylist + include-go-root: true + packages: + - testing + - github.com/stretchr/testify + ignore-file-rules: + - "**/*_test.go" + - "**/*test/*.go" + - "**/internal/matchers/*.go" + godot: + exclude: + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 0000000000..40d62fa2eb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 0000000000..3202496c35 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 0000000000..1d9726f60b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2369 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql worfklow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 0000000000..c4012ed6ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 0000000000..a6928bfdff --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,526 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered to be **ready to merge** when: + +* It has received two approvals from Collaborators/Maintainers (at + different companies). This is not enforced through technical means + and a PR may be **ready to merge** with a single approval if the change + and its approach have been discussed and consensus reached. +* Feedback has been addressed. +* Any substantive changes to your PR will require that you clear any prior + Approval reviews, this includes changes resulting from other feedback. Unless + the approver explicitly stated that their approval will persist across + changes it should be assumed that the PR needs their review again. Other + project members (e.g. approvers, maintainers) can help with this if there are + any questions or if you forget to clear reviews. +* It has been open for review for at least one working day. This gives + people reasonable time to review. +* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for + one day and may be merged with a single Maintainer's approval. +* `CHANGELOG.md` has been updated to reflect what has been + added, changed, removed, or fixed. +* `README.md` has been updated if necessary. +* Urgent fix can take exception as long as it has been actively + communicated. + +Any Maintainer can merge the PR once it is **ready to merge**. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). + +It's especially valuable to read through the [library +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each non-example Go Module should have its own `README.md` containing: + +- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +- Brief description. +- Installation instructions (and requirements if applicable). +- Hyperlink to an example. Depending on the component the example can be: + - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). + - A sample Go application with its own `README.md`, like [here](example/zipkin). +- Additional documentation sections such us: + - Configuration, + - Contributing, + - References. + +[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. + +Moreover, it should be possible to navigate to any `README.md` from the +root `README.md`. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefor it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Preform any validation here. + return config +} +``` + +If validation of the `config` options is also preformed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +doumentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +Otherwise, stable interfaces MUST NOT be modified. + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +## Approvers and Maintainers + +Approvers: + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic + +Maintainers: + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +Emeritus: + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 0000000000..0e6ffa284e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,227 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default +ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) + +# Build + +.PHONY: generate build + +generate: $(OTEL_GO_MOD_DIRS:%=generate/%) +generate/%: DIR=$* +generate/%: | $(STRINGER) $(PORTO) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . + +build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.18 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 0000000000..878d87e58b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,114 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | Project | +| ------- | ---------- | ------- | +| Traces | Stable | N/A | +| Metrics | Alpha | N/A | +| Logs | Frozen [1] | N/A | + +- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our local +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](./VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +| ------- | ---------- | ------------ | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.19 | amd64 | +| Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.20 | 386 | +| Ubuntu | 1.19 | 386 | +| Ubuntu | 1.18 | 386 | +| MacOS | 1.20 | amd64 | +| MacOS | 1.19 | amd64 | +| MacOS | 1.18 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.19 | amd64 | +| Windows | 1.18 | amd64 | +| Windows | 1.20 | 386 | +| Windows | 1.19 | 386 | +| Windows | 1.18 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +| :-----------------------------------: | :-----: | :----: | +| [Jaeger](./exporters/jaeger/) | | ✓ | +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 0000000000..77d56c9365 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,127 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.13.0" # Change to the release version you are generating. +export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 0000000000..412f1e362b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 0000000000..dafe7424df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 0000000000..fe2bc5766c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 0000000000..841b271fb7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 0000000000..0656a04e43 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 0000000000..1ddf3ce058 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 0000000000..26be598322 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,424 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type supports the Equivalent method of comparison using values of + // type Distinct. + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. + Distinct struct { + iface interface{} + } + + // Filter supports removing certain attributes from attribute sets. When + // the filter returns true, the attribute will be kept in the filtered + // attribute set. When the filter returns false, the attribute is excluded + // from the filtered attribute set, and the attribute instead appears in + // the removed list of excluded attributes. + Filter func(KeyValue) bool + + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + return NewSetWithSortableFiltered(kvs, new(Sortable), filter) +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + if filter != nil { + return filterSet(kvs[position:], filter) + } + return Set{ + equivalent: computeDistinct(kvs[position:]), + }, nil +} + +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. +func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + var excluded []KeyValue + + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). + distinctPosition := len(kvs) + + // Swap indistinct keys forward and distinct keys toward the + // end of the slice. + offset := len(kvs) - 1 + for ; offset >= 0; offset-- { + if filter(kvs[offset]) { + distinctPosition-- + kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] + continue + } + } + excluded = kvs[:distinctPosition] + + return Set{ + equivalent: computeDistinct(kvs[distinctPosition:]), + }, excluded +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return Set{ + equivalent: l.equivalent, + }, nil + } + + // Note: This could be refactored to avoid the temporary slice + // allocation, if it proves to be expensive. + return filterSet(l.ToSlice(), re) +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 0000000000..e584b24776 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 0000000000..cb21dd5c09 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,270 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(v.asInt64Slice()) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(v.asFloat64Slice()) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(v.asStringSlice()) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 0000000000..a36db8f8d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,570 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key, hasData: true} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. +func NewKeyValueProperty(key, value string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + hasData: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + p := Property{hasData: true} + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !p.hasData { + return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + parts := strings.SplitN(member, propertyDelimiter, 2) + switch len(parts) { + case 2: + // Parse the member properties. + for _, pStr := range strings.Split(parts[1], propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + fallthrough + case 1: + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + kv := strings.SplitN(parts[0], keyValueDelimiter, 2) + if len(kv) != 2 { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key = strings.TrimSpace(kv[0]) + var err error + value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", err, value) + } + if !keyRe.MatchString(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + default: + // This should never happen unless a developer has changed the string + // splitting somehow. Panic instead of failing silently and allowing + // the bug to slip past the CI checks. + panic("failed to parse baggage member") + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 0000000000..24b34b7564 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 0000000000..4545100df6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 0000000000..587ebae4e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 0000000000..df3e0f1b62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 0000000000..daa36c89dc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 0000000000..72fad85412 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md new file mode 100644 index 0000000000..598c569a51 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/README.md @@ -0,0 +1,50 @@ +# OpenTelemetry-Go Jaeger Exporter + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger) + +[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) implementation. + +## Installation + +``` +go get -u go.opentelemetry.io/otel/exporters/jaeger +``` + +## Example + +See [../../example/jaeger](../../example/jaeger). + +## Configuration + +The exporter can be used to send spans to: + +- Jaeger agent using `jaeger.thrift` over compact thrift protocol via + [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option. +- Jaeger collector using `jaeger.thrift` over HTTP via + [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option. + +### Environment Variables + +The following environment variables can be used +(instead of options objects) to override the default configuration. + +| Environment variable | Option | Default value | +| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- | +| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost` | +| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831` | +| `OTEL_EXPORTER_JAEGER_ENDPOINT` | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint) | `http://localhost:14268/api/traces` | +| `OTEL_EXPORTER_JAEGER_USER` | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername) | | +| `OTEL_EXPORTER_JAEGER_PASSWORD` | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword) | | + +Configuration using options have precedence over the environment variables. + +## Contributing + +This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path. +When re-generating Thrift code in the future, please adapt import paths as necessary. + +## References + +- [Jaeger](https://www.jaegertracing.io/) +- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/jaeger.md) +- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go new file mode 100644 index 0000000000..a050020bb4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/agent.go @@ -0,0 +1,213 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "fmt" + "io" + "net" + "strings" + "time" + + "github.com/go-logr/logr" + + genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +const ( + // udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent. + udpPacketMaxLength = 65000 + // emitBatchOverhead is the additional overhead bytes used for enveloping the datagram, + // synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37 + emitBatchOverhead = 70 +) + +// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. +type agentClientUDP struct { + genAgent.Agent + io.Closer + + connUDP udpConn + client *genAgent.AgentClient + maxPacketSize int // max size of datagram in bytes + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span + thriftProtocol thrift.TProtocol +} + +type udpConn interface { + Write([]byte) (int, error) + SetWriteBuffer(int) error + Close() error +} + +type agentClientUDPParams struct { + Host string + Port string + MaxPacketSize int + Logger logr.Logger + AttemptReconnecting bool + AttemptReconnectInterval time.Duration +} + +// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) { + hostPort := net.JoinHostPort(params.Host, params.Port) + // validate hostport + if _, _, err := net.SplitHostPort(hostPort); err != nil { + return nil, err + } + + if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength { + params.MaxPacketSize = udpPacketMaxLength + } + + if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 { + params.AttemptReconnectInterval = time.Second * 30 + } + + thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize) + protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{}) + thriftProtocol := protocolFactory.GetProtocol(thriftBuffer) + client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory) + + var connUDP udpConn + var err error + + if params.AttemptReconnecting { + // host is hostname, setup resolver loop in case host record changes during operation + connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger) + if err != nil { + return nil, err + } + } else { + destAddr, err := net.ResolveUDPAddr("udp", hostPort) + if err != nil { + return nil, err + } + + connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + } + + if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil { + return nil, err + } + + return &agentClientUDP{ + connUDP: connUDP, + client: client, + maxPacketSize: params.MaxPacketSize, + thriftBuffer: thriftBuffer, + thriftProtocol: thriftProtocol, + }, nil +} + +// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent. +func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error { + var errs []error + processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process) + if err != nil { + // drop the batch if serialization of process fails. + return err + } + + maxPacketSize := a.maxPacketSize + if maxPacketSize > udpPacketMaxLength-emitBatchOverhead { + maxPacketSize = udpPacketMaxLength - emitBatchOverhead + } + totalSize := processSize + var spans []*gen.Span + for _, span := range batch.Spans { + spanSize, err := a.calcSizeOfSerializedThrift(ctx, span) + if err != nil { + errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span)) + continue + } + if spanSize+processSize >= maxPacketSize { + // drop the span that exceeds the limit. + errs = append(errs, fmt.Errorf("span too large to send: %v", span)) + continue + } + if totalSize+spanSize >= maxPacketSize { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + spans = spans[:0] + totalSize = processSize + } + totalSize += spanSize + spans = append(spans, span) + } + + if len(spans) > 0 { + if err := a.flush(ctx, &gen.Batch{ + Process: batch.Process, + Spans: spans, + }); err != nil { + errs = append(errs, err) + } + } + + if len(errs) == 1 { + return errs[0] + } else if len(errs) > 1 { + joined := a.makeJoinedErrorString(errs) + return fmt.Errorf("multiple errors during transform: %s", joined) + } + return nil +} + +// makeJoinedErrorString join all the errors to one error message. +func (a *agentClientUDP) makeJoinedErrorString(errs []error) string { + var errMsgs []string + for _, err := range errs { + errMsgs = append(errMsgs, err.Error()) + } + return strings.Join(errMsgs, ", ") +} + +// flush will send the batch of spans to the agent. +func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error { + a.thriftBuffer.Reset() + if err := a.client.EmitBatch(ctx, batch); err != nil { + return err + } + if a.thriftBuffer.Len() > a.maxPacketSize { + return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d", + a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) + } + _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) + return err +} + +// calcSizeOfSerializedThrift calculate the serialized thrift packet size. +func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) { + a.thriftBuffer.Reset() + err := thriftStruct.Write(ctx, a.thriftProtocol) + return a.thriftBuffer.Len(), err +} + +// Close implements Close() of io.Closer and closes the underlying UDP connection. +func (a *agentClientUDP) Close() error { + return a.connUDP.Close() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go new file mode 100644 index 0000000000..0d7ba86764 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger. +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go new file mode 100644 index 0000000000..a7253e4831 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/env.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "os" +) + +// Environment variable names. +const ( + // Hostname for the Jaeger agent, part of address where exporter sends spans + // i.e. "localhost". + envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST" + // Port for the Jaeger agent, part of address where exporter sends spans + // i.e. 6831. + envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT" + // The HTTP endpoint for sending spans directly to a collector, + // i.e. http://jaeger-collector:14268/api/traces. + envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT" + // Username to send as part of "Basic" authentication to the collector endpoint. + envUser = "OTEL_EXPORTER_JAEGER_USER" + // Password to send as part of "Basic" authentication to the collector endpoint. + envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD" +) + +// envOr returns an env variable's value if it is exists or the default if not. +func envOr(key, defaultValue string) string { + if v, ok := os.LookupEnv(key); ok && v != "" { + return v + } + return defaultValue +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go new file mode 100644 index 0000000000..3b96e3222e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent-consts.go @@ -0,0 +1,27 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +func init() { +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go new file mode 100644 index 0000000000..c7c8e9ca3e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent/agent.go @@ -0,0 +1,412 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package agent + +import ( + "bytes" + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +var _ = jaeger.GoUnusedProtection__ +var _ = zipkincore.GoUnusedProtection__ + +type Agent interface { + // Parameters: + // - Spans + EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) + // Parameters: + // - Batch + EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) +} + +type AgentClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAgentClient(c thrift.TClient) *AgentClient { + return &AgentClient{ + c: c, + } +} + +func (p *AgentClient) Client_() thrift.TClient { + return p.c +} + +func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) { + var _args0 AgentEmitZipkinBatchArgs + _args0.Spans = spans + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil { + return err + } + return nil +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) { + var _args1 AgentEmitBatchArgs + _args1.Batch = batch + p.SetLastResponseMeta_(thrift.ResponseMeta{}) + if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil { + return err + } + return nil +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler} + self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self2 +} + +func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x3.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x3 + +} + +type agentProcessorEmitZipkinBatch struct { + handler Agent +} + +func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + _ = tickerCancel + + if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil { + tickerCancel() + return true, thrift.WrapTException(err2) + } + tickerCancel() + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type AgentEmitZipkinBatchArgs struct { + Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs { + return &AgentEmitZipkinBatchArgs{} +} + +func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span { + return p.Spans +} +func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*zipkincore.Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem4 := &zipkincore.Span{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Spans = append(p.Spans, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *AgentEmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch + +func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Batch = &jaeger.Batch{} + if err := p.Batch.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go similarity index 76% rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go index b6ce85570b..10162857fb 100644 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go @@ -2,12 +2,13 @@ package jaeger -import( +import ( "bytes" "context" "fmt" "time" - "github.com/uber/jaeger-client-go/thrift" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) @@ -17,7 +18,5 @@ var _ = context.Background var _ = time.Now var _ = bytes.Equal - func init() { } - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go new file mode 100644 index 0000000000..b1fe26c57d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger/jaeger.go @@ -0,0 +1,3022 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package jaeger + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type TagType int64 + +const ( + TagType_STRING TagType = 0 + TagType_DOUBLE TagType = 1 + TagType_BOOL TagType = 2 + TagType_LONG TagType = 3 + TagType_BINARY TagType = 4 +) + +func (p TagType) String() string { + switch p { + case TagType_STRING: + return "STRING" + case TagType_DOUBLE: + return "DOUBLE" + case TagType_BOOL: + return "BOOL" + case TagType_LONG: + return "LONG" + case TagType_BINARY: + return "BINARY" + } + return "" +} + +func TagTypeFromString(s string) (TagType, error) { + switch s { + case "STRING": + return TagType_STRING, nil + case "DOUBLE": + return TagType_DOUBLE, nil + case "BOOL": + return TagType_BOOL, nil + case "LONG": + return TagType_LONG, nil + case "BINARY": + return TagType_BINARY, nil + } + return TagType(0), fmt.Errorf("not a valid TagType string") +} + +func TagTypePtr(v TagType) *TagType { return &v } + +func (p TagType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *TagType) UnmarshalText(text []byte) error { + q, err := TagTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *TagType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = TagType(v) + return nil +} + +func (p *TagType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type SpanRefType int64 + +const ( + SpanRefType_CHILD_OF SpanRefType = 0 + SpanRefType_FOLLOWS_FROM SpanRefType = 1 +) + +func (p SpanRefType) String() string { + switch p { + case SpanRefType_CHILD_OF: + return "CHILD_OF" + case SpanRefType_FOLLOWS_FROM: + return "FOLLOWS_FROM" + } + return "" +} + +func SpanRefTypeFromString(s string) (SpanRefType, error) { + switch s { + case "CHILD_OF": + return SpanRefType_CHILD_OF, nil + case "FOLLOWS_FROM": + return SpanRefType_FOLLOWS_FROM, nil + } + return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") +} + +func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } + +func (p SpanRefType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SpanRefType) UnmarshalText(text []byte) error { + q, err := SpanRefTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SpanRefType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SpanRefType(v) + return nil +} + +func (p *SpanRefType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Key +// - VType +// - VStr +// - VDouble +// - VBool +// - VLong +// - VBinary +type Tag struct { + Key string `thrift:"key,1,required" db:"key" json:"key"` + VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` + VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` + VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` + VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` + VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` + VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` +} + +func NewTag() *Tag { + return &Tag{} +} + +func (p *Tag) GetKey() string { + return p.Key +} + +func (p *Tag) GetVType() TagType { + return p.VType +} + +var Tag_VStr_DEFAULT string + +func (p *Tag) GetVStr() string { + if !p.IsSetVStr() { + return Tag_VStr_DEFAULT + } + return *p.VStr +} + +var Tag_VDouble_DEFAULT float64 + +func (p *Tag) GetVDouble() float64 { + if !p.IsSetVDouble() { + return Tag_VDouble_DEFAULT + } + return *p.VDouble +} + +var Tag_VBool_DEFAULT bool + +func (p *Tag) GetVBool() bool { + if !p.IsSetVBool() { + return Tag_VBool_DEFAULT + } + return *p.VBool +} + +var Tag_VLong_DEFAULT int64 + +func (p *Tag) GetVLong() int64 { + if !p.IsSetVLong() { + return Tag_VLong_DEFAULT + } + return *p.VLong +} + +var Tag_VBinary_DEFAULT []byte + +func (p *Tag) GetVBinary() []byte { + return p.VBinary +} +func (p *Tag) IsSetVStr() bool { + return p.VStr != nil +} + +func (p *Tag) IsSetVDouble() bool { + return p.VDouble != nil +} + +func (p *Tag) IsSetVBool() bool { + return p.VBool != nil +} + +func (p *Tag) IsSetVLong() bool { + return p.VLong != nil +} + +func (p *Tag) IsSetVBinary() bool { + return p.VBinary != nil +} + +func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetKey bool = false + var issetVType bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetKey = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetVType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.DOUBLE { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetKey { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) + } + if !issetVType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) + } + return nil +} + +func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := TagType(v) + p.VType = temp + } + return nil +} + +func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.VStr = &v + } + return nil +} + +func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadDouble(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.VDouble = &v + } + return nil +} + +func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.VBool = &v + } + return nil +} + +func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.VLong = &v + } + return nil +} + +func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.VBinary = v + } + return nil +} + +func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField7(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) + } + return err +} + +func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVStr() { + if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) + } + if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) + } + } + return err +} + +func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVDouble() { + if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) + } + if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) + } + } + return err +} + +func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBool() { + if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) + } + } + return err +} + +func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVLong() { + if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) + } + } + return err +} + +func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetVBinary() { + if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.VBinary); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) + } + } + return err +} + +func (p *Tag) Equals(other *Tag) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if p.VType != other.VType { + return false + } + if p.VStr != other.VStr { + if p.VStr == nil || other.VStr == nil { + return false + } + if (*p.VStr) != (*other.VStr) { + return false + } + } + if p.VDouble != other.VDouble { + if p.VDouble == nil || other.VDouble == nil { + return false + } + if (*p.VDouble) != (*other.VDouble) { + return false + } + } + if p.VBool != other.VBool { + if p.VBool == nil || other.VBool == nil { + return false + } + if (*p.VBool) != (*other.VBool) { + return false + } + } + if p.VLong != other.VLong { + if p.VLong == nil || other.VLong == nil { + return false + } + if (*p.VLong) != (*other.VLong) { + return false + } + } + if bytes.Compare(p.VBinary, other.VBinary) != 0 { + return false + } + return true +} + +func (p *Tag) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Tag(%+v)", *p) +} + +// Attributes: +// - Timestamp +// - Fields +type Log struct { + Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` + Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` +} + +func NewLog() *Log { + return &Log{} +} + +func (p *Log) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Log) GetFields() []*Tag { + return p.Fields +} +func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTimestamp bool = false + var issetFields bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTimestamp = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetFields = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTimestamp { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) + } + if !issetFields { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) + } + return nil +} + +func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Fields = tSlice + for i := 0; i < size; i++ { + _elem0 := &Tag{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Fields = append(p.Fields, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Log"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Fields { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) + } + return err +} + +func (p *Log) Equals(other *Log) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if len(p.Fields) != len(other.Fields) { + return false + } + for i, _tgt := range p.Fields { + _src1 := other.Fields[i] + if !_tgt.Equals(_src1) { + return false + } + } + return true +} + +func (p *Log) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Log(%+v)", *p) +} + +// Attributes: +// - RefType +// - TraceIdLow +// - TraceIdHigh +// - SpanId +type SpanRef struct { + RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` + TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` +} + +func NewSpanRef() *SpanRef { + return &SpanRef{} +} + +func (p *SpanRef) GetRefType() SpanRefType { + return p.RefType +} + +func (p *SpanRef) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *SpanRef) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *SpanRef) GetSpanId() int64 { + return p.SpanId +} +func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetRefType bool = false + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetRefType = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetRefType { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + return nil +} + +func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := SpanRefType(v) + p.RefType = temp + } + return nil +} + +func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) + } + return err +} + +func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) + } + return err +} + +func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) + } + return err +} + +func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) + } + return err +} + +func (p *SpanRef) Equals(other *SpanRef) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.RefType != other.RefType { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + return true +} + +func (p *SpanRef) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanRef(%+v)", *p) +} + +// Attributes: +// - TraceIdLow +// - TraceIdHigh +// - SpanId +// - ParentSpanId +// - OperationName +// - References +// - Flags +// - StartTime +// - Duration +// - Tags +// - Logs +type Span struct { + TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` + TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` + SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` + ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` + OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` + References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` + Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` + StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` + Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` + Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` + Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceIdLow() int64 { + return p.TraceIdLow +} + +func (p *Span) GetTraceIdHigh() int64 { + return p.TraceIdHigh +} + +func (p *Span) GetSpanId() int64 { + return p.SpanId +} + +func (p *Span) GetParentSpanId() int64 { + return p.ParentSpanId +} + +func (p *Span) GetOperationName() string { + return p.OperationName +} + +var Span_References_DEFAULT []*SpanRef + +func (p *Span) GetReferences() []*SpanRef { + return p.References +} + +func (p *Span) GetFlags() int32 { + return p.Flags +} + +func (p *Span) GetStartTime() int64 { + return p.StartTime +} + +func (p *Span) GetDuration() int64 { + return p.Duration +} + +var Span_Tags_DEFAULT []*Tag + +func (p *Span) GetTags() []*Tag { + return p.Tags +} + +var Span_Logs_DEFAULT []*Log + +func (p *Span) GetLogs() []*Log { + return p.Logs +} +func (p *Span) IsSetReferences() bool { + return p.References != nil +} + +func (p *Span) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Span) IsSetLogs() bool { + return p.Logs != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetTraceIdLow bool = false + var issetTraceIdHigh bool = false + var issetSpanId bool = false + var issetParentSpanId bool = false + var issetOperationName bool = false + var issetFlags bool = false + var issetStartTime bool = false + var issetDuration bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetTraceIdLow = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTraceIdHigh = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + issetParentSpanId = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + issetOperationName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(ctx, iprot); err != nil { + return err + } + issetFlags = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I64 { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + issetStartTime = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + issetDuration = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.LIST { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.LIST { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetTraceIdLow { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) + } + if !issetTraceIdHigh { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) + } + if !issetSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) + } + if !issetParentSpanId { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) + } + if !issetOperationName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) + } + if !issetFlags { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) + } + if !issetStartTime { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) + } + if !issetDuration { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceIdLow = v + } + return nil +} + +func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TraceIdHigh = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SpanId = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ParentSpanId = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.OperationName = v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*SpanRef, 0, size) + p.References = tSlice + for i := 0; i < size; i++ { + _elem2 := &SpanRef{} + if err := _elem2.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.References = append(p.References, _elem2) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Flags = v + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Duration = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem3 := &Tag{} + if err := _elem3.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Tags = append(p.Tags, _elem3) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Log, 0, size) + p.Logs = tSlice + for i := 0; i < size; i++ { + _elem4 := &Log{} + if err := _elem4.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Logs = append(p.Logs, _elem4) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField7(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) + } + return err +} + +func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetReferences() { + if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.References { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) + } + } + return err +} + +func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetLogs() { + if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Logs { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceIdLow != other.TraceIdLow { + return false + } + if p.TraceIdHigh != other.TraceIdHigh { + return false + } + if p.SpanId != other.SpanId { + return false + } + if p.ParentSpanId != other.ParentSpanId { + return false + } + if p.OperationName != other.OperationName { + return false + } + if len(p.References) != len(other.References) { + return false + } + for i, _tgt := range p.References { + _src5 := other.References[i] + if !_tgt.Equals(_src5) { + return false + } + } + if p.Flags != other.Flags { + return false + } + if p.StartTime != other.StartTime { + return false + } + if p.Duration != other.Duration { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src6 := other.Tags[i] + if !_tgt.Equals(_src6) { + return false + } + } + if len(p.Logs) != len(other.Logs) { + return false + } + for i, _tgt := range p.Logs { + _src7 := other.Logs[i] + if !_tgt.Equals(_src7) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - ServiceName +// - Tags +type Process struct { + ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` + Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` +} + +func NewProcess() *Process { + return &Process{} +} + +func (p *Process) GetServiceName() string { + return p.ServiceName +} + +var Process_Tags_DEFAULT []*Tag + +func (p *Process) GetTags() []*Tag { + return p.Tags +} +func (p *Process) IsSetTags() bool { + return p.Tags != nil +} + +func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetServiceName bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetServiceName = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetServiceName { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) + } + return nil +} + +func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Tag, 0, size) + p.Tags = tSlice + for i := 0; i < size; i++ { + _elem8 := &Tag{} + if err := _elem8.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) + } + p.Tags = append(p.Tags, _elem8) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Process"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) + } + return err +} + +func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTags() { + if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Tags { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) + } + } + return err +} + +func (p *Process) Equals(other *Process) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if len(p.Tags) != len(other.Tags) { + return false + } + for i, _tgt := range p.Tags { + _src9 := other.Tags[i] + if !_tgt.Equals(_src9) { + return false + } + } + return true +} + +func (p *Process) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Process(%+v)", *p) +} + +// Attributes: +// - FullQueueDroppedSpans +// - TooLargeDroppedSpans +// - FailedToEmitSpans +type ClientStats struct { + FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"` + TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"` + FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"` +} + +func NewClientStats() *ClientStats { + return &ClientStats{} +} + +func (p *ClientStats) GetFullQueueDroppedSpans() int64 { + return p.FullQueueDroppedSpans +} + +func (p *ClientStats) GetTooLargeDroppedSpans() int64 { + return p.TooLargeDroppedSpans +} + +func (p *ClientStats) GetFailedToEmitSpans() int64 { + return p.FailedToEmitSpans +} +func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetFullQueueDroppedSpans bool = false + var issetTooLargeDroppedSpans bool = false + var issetFailedToEmitSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetFullQueueDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetTooLargeDroppedSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + issetFailedToEmitSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetFullQueueDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set")) + } + if !issetTooLargeDroppedSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set")) + } + if !issetFailedToEmitSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set")) + } + return nil +} + +func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.FullQueueDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TooLargeDroppedSpans = v + } + return nil +} + +func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.FailedToEmitSpans = v + } + return nil +} + +func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) + } + return err +} + +func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) + } + return err +} + +func (p *ClientStats) Equals(other *ClientStats) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { + return false + } + if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { + return false + } + if p.FailedToEmitSpans != other.FailedToEmitSpans { + return false + } + return true +} + +func (p *ClientStats) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClientStats(%+v)", *p) +} + +// Attributes: +// - Process +// - Spans +// - SeqNo +// - Stats +type Batch struct { + Process *Process `thrift:"process,1,required" db:"process" json:"process"` + Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` + SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"` + Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"` +} + +func NewBatch() *Batch { + return &Batch{} +} + +var Batch_Process_DEFAULT *Process + +func (p *Batch) GetProcess() *Process { + if !p.IsSetProcess() { + return Batch_Process_DEFAULT + } + return p.Process +} + +func (p *Batch) GetSpans() []*Span { + return p.Spans +} + +var Batch_SeqNo_DEFAULT int64 + +func (p *Batch) GetSeqNo() int64 { + if !p.IsSetSeqNo() { + return Batch_SeqNo_DEFAULT + } + return *p.SeqNo +} + +var Batch_Stats_DEFAULT *ClientStats + +func (p *Batch) GetStats() *ClientStats { + if !p.IsSetStats() { + return Batch_Stats_DEFAULT + } + return p.Stats +} +func (p *Batch) IsSetProcess() bool { + return p.Process != nil +} + +func (p *Batch) IsSetSeqNo() bool { + return p.SeqNo != nil +} + +func (p *Batch) IsSetStats() bool { + return p.Stats != nil +} + +func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetProcess bool = false + var issetSpans bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetProcess = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + issetSpans = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetProcess { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) + } + if !issetSpans { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) + } + return nil +} + +func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + p.Process = &Process{} + if err := p.Process.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) + } + return nil +} + +func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem10 := &Span{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Spans = append(p.Spans, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SeqNo = &v + } + return nil +} + +func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Stats = &ClientStats{} + if err := p.Stats.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err) + } + return nil +} + +func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) + } + if err := p.Process.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) + } + return err +} + +func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) + } + return err +} + +func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSeqNo() { + if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) + } + } + return err +} + +func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetStats() { + if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) + } + if err := p.Stats.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) + } + } + return err +} + +func (p *Batch) Equals(other *Batch) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if !p.Process.Equals(other.Process) { + return false + } + if len(p.Spans) != len(other.Spans) { + return false + } + for i, _tgt := range p.Spans { + _src11 := other.Spans[i] + if !_tgt.Equals(_src11) { + return false + } + } + if p.SeqNo != other.SeqNo { + if p.SeqNo == nil || other.SeqNo == nil { + return false + } + if (*p.SeqNo) != (*other.SeqNo) { + return false + } + } + if !p.Stats.Equals(other.Stats) { + return false + } + return true +} + +func (p *Batch) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Batch(%+v)", *p) +} + +// Attributes: +// - Ok +type BatchSubmitResponse struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewBatchSubmitResponse() *BatchSubmitResponse { + return &BatchSubmitResponse{} +} + +func (p *BatchSubmitResponse) GetOk() bool { + return p.Ok +} +func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + +func (p *BatchSubmitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) +} + +type Collector interface { + // Parameters: + // - Batches + SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) +} + +type CollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { + return &CollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewCollectorClient(c thrift.TClient) *CollectorClient { + return &CollectorClient{ + c: c, + } +} + +func (p *CollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Batches +func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) { + var _args12 CollectorSubmitBatchesArgs + _args12.Batches = batches + var _result14 CollectorSubmitBatchesResult + var _meta13 thrift.ResponseMeta + _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14) + p.SetLastResponseMeta_(_meta13) + if _err != nil { + return + } + return _result14.GetSuccess(), nil +} + +type CollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Collector +} + +func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewCollectorProcessor(handler Collector) *CollectorProcessor { + + self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} + return self15 +} + +func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x16.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x16 + +} + +type collectorProcessorSubmitBatches struct { + handler Collector +} + +func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := CollectorSubmitBatchesArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := CollectorSubmitBatchesResult{} + var retval []*BatchSubmitResponse + if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) + oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batches +type CollectorSubmitBatchesArgs struct { + Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` +} + +func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { + return &CollectorSubmitBatchesArgs{} +} + +func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { + return p.Batches +} +func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Batch, 0, size) + p.Batches = tSlice + for i := 0; i < size; i++ { + _elem17 := &Batch{} + if err := _elem17.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) + } + p.Batches = append(p.Batches, _elem17) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Batches { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) + } + return err +} + +func (p *CollectorSubmitBatchesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type CollectorSubmitBatchesResult struct { + Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { + return &CollectorSubmitBatchesResult{} +} + +var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse + +func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { + return p.Success +} +func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BatchSubmitResponse, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem18 := &BatchSubmitResponse{} + if err := _elem18.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) + } + p.Success = append(p.Success, _elem18) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *CollectorSubmitBatchesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go similarity index 88% rename from vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go index 7a924b9770..043ecba962 100644 --- a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go @@ -2,12 +2,13 @@ package zipkincore -import( +import ( "bytes" "context" "fmt" "time" - "github.com/uber/jaeger-client-go/thrift" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) @@ -36,4 +37,3 @@ const MESSAGE_ADDR = "ma" func init() { } - diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go new file mode 100644 index 0000000000..7f46810e0d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go @@ -0,0 +1,2067 @@ +// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// +// Conventionally, when the port isn't known, port = 0. +// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web" +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// An annotation is similar to a log statement. It includes a host field which +// allows these events to be attributed properly, and also aggregatable. +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or syncing nanoTime against a tick of currentTimeMillis. +// - Value +// - Host: Always the host that recorded the event. By specifying the host you allow +// rollup of all events (such as client requests to a service) by IP address. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of "http.uri" could the path to a resource in a +// RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.uri" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key +// - Value +// - AnnotationType +// - Host: The host that recorded tag, which allows you to differentiate between +// multiple tags with the same key. There are two exceptions to this. +// +// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, or clients such as web browsers. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// The root span is where trace_id = id and parent_id = Nil. The root span is +// usually the longest interval in the trace, starting with a SERVER_RECV +// annotation and ending with a SERVER_SEND. +// +// Attributes: +// - TraceID +// - Name: Span name in lowercase, rpc method for example +// +// Conventionally, when the span name isn't known, name = "unknown". +// - ID +// - ParentID +// - Annotations +// - BinaryAnnotations +// - Debug +// - Timestamp: Microseconds from epoch of the creation of this span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// This field is optional for compatibility with old data: first-party span +// stores are expected to support this at time of introduction. +// - Duration: Measurement of duration in microseconds, used to support queries. +// +// This value should be set directly, where possible. Doing so encourages +// precise measurement decoupled from problems of clocks, such as skew or NTP +// updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + +// Attributes: +// - Ok +type Response struct { + Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` +} + +func NewResponse() *Response { + return &Response{} +} + +func (p *Response) GetOk() bool { + return p.Ok +} +func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + var issetOk bool = false + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + issetOk = true + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + if !issetOk { + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) + } + return nil +} + +func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ok = v + } + return nil +} + +func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) + } + return err +} + +func (p *Response) Equals(other *Response) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ok != other.Ok { + return false + } + return true +} + +func (p *Response) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Response(%+v)", *p) +} + +type ZipkinCollector interface { + // Parameters: + // - Spans + SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) +} + +type ZipkinCollectorClient struct { + c thrift.TClient + meta thrift.ResponseMeta +} + +func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient { + return &ZipkinCollectorClient{ + c: c, + } +} + +func (p *ZipkinCollectorClient) Client_() thrift.TClient { + return p.c +} + +func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta { + return p.meta +} + +func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { + p.meta = meta +} + +// Parameters: +// - Spans +func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) { + var _args4 ZipkinCollectorSubmitZipkinBatchArgs + _args4.Spans = spans + var _result6 ZipkinCollectorSubmitZipkinBatchResult + var _meta5 thrift.ResponseMeta + _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6) + p.SetLastResponseMeta_(_meta5) + if _err != nil { + return + } + return _result6.GetSuccess(), nil +} + +type ZipkinCollectorProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ZipkinCollector +} + +func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor { + + self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler} + return self7 +} + +func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) + if err2 != nil { + return false, thrift.WrapTException(err2) + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(ctx, thrift.STRUCT) + iprot.ReadMessageEnd(ctx) + x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) + x8.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, x8 + +} + +type zipkinCollectorProcessorSubmitZipkinBatch struct { + handler ZipkinCollector +} + +func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ZipkinCollectorSubmitZipkinBatchArgs{} + var err2 error + if err2 = args.Read(ctx, iprot); err2 != nil { + iprot.ReadMessageEnd(ctx) + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return false, thrift.WrapTException(err2) + } + iprot.ReadMessageEnd(ctx) + + tickerCancel := func() {} + // Start a goroutine to do server side connectivity check. + if thrift.ServerConnectivityCheckInterval > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + var tickerCtx context.Context + tickerCtx, tickerCancel = context.WithCancel(context.Background()) + defer tickerCancel() + go func(ctx context.Context, cancel context.CancelFunc) { + ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !iprot.Transport().IsOpen() { + cancel() + return + } + } + } + }(tickerCtx, cancel) + } + + result := ZipkinCollectorSubmitZipkinBatchResult{} + var retval []*Response + if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil { + tickerCancel() + if err2 == thrift.ErrAbandonRequest { + return false, thrift.WrapTException(err2) + } + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error()) + oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId) + x.Write(ctx, oprot) + oprot.WriteMessageEnd(ctx) + oprot.Flush(ctx) + return true, thrift.WrapTException(err2) + } else { + result.Success = retval + } + tickerCancel() + if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = result.Write(ctx, oprot); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = thrift.WrapTException(err2) + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Spans +type ZipkinCollectorSubmitZipkinBatchArgs struct { + Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"` +} + +func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs { + return &ZipkinCollectorSubmitZipkinBatchArgs{} +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span { + return p.Spans +} +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Span, 0, size) + p.Spans = tSlice + for i := 0; i < size; i++ { + _elem9 := &Span{} + if err := _elem9.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Spans = append(p.Spans, _elem9) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Spans { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ZipkinCollectorSubmitZipkinBatchResult struct { + Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult { + return &ZipkinCollectorSubmitZipkinBatchResult{} +} + +var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response + +func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response { + return p.Success +} +func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.LIST { + if err := p.ReadField0(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Response, 0, size) + p.Success = tSlice + for i := 0; i < size; i++ { + _elem10 := &Response{} + if err := _elem10.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Success = append(p.Success, _elem10) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Success { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE new file mode 100644 index 0000000000..2bc6fbbf65 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/LICENSE @@ -0,0 +1,306 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE new file mode 100644 index 0000000000..37824e7fb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/NOTICE @@ -0,0 +1,5 @@ +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go new file mode 100644 index 0000000000..aa551b4ab3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" +) + +type TBufferedTransportFactory struct { + size int +} + +type TBufferedTransport struct { + bufio.ReadWriter + tp TTransport +} + +func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTBufferedTransport(trans, p.size), nil +} + +func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { + return &TBufferedTransportFactory{size: bufferSize} +} + +func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { + return &TBufferedTransport{ + ReadWriter: bufio.ReadWriter{ + Reader: bufio.NewReaderSize(trans, bufferSize), + Writer: bufio.NewWriterSize(trans, bufferSize), + }, + tp: trans, + } +} + +func (p *TBufferedTransport) IsOpen() bool { + return p.tp.IsOpen() +} + +func (p *TBufferedTransport) Open() (err error) { + return p.tp.Open() +} + +func (p *TBufferedTransport) Close() (err error) { + return p.tp.Close() +} + +func (p *TBufferedTransport) Read(b []byte) (int, error) { + n, err := p.ReadWriter.Read(b) + if err != nil { + p.ReadWriter.Reader.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Write(b []byte) (int, error) { + n, err := p.ReadWriter.Write(b) + if err != nil { + p.ReadWriter.Writer.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Flush(ctx context.Context) error { + if err := p.ReadWriter.Flush(); err != nil { + p.ReadWriter.Writer.Reset(p.tp) + return err + } + return p.tp.Flush(ctx) +} + +func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { + return p.tp.RemainingBytes() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.tp, conf) +} + +var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/client.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/configuration.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/context.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go new file mode 100644 index 0000000000..fdf9bfec15 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go @@ -0,0 +1,447 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" +) + +type TDebugProtocol struct { + // Required. The actual TProtocol to do the read/write. + Delegate TProtocol + + // Optional. The logger and prefix to log all the args/return values + // from Delegate TProtocol calls. + // + // If Logger is nil, StdLogger using stdlib log package with os.Stderr + // will be used. If disable logging is desired, set Logger to NopLogger + // explicitly instead of leaving it as nil/unset. + Logger Logger + LogPrefix string + + // Optional. An TProtocol to duplicate everything read/written from Delegate. + // + // A typical use case of this is to use TSimpleJSONProtocol wrapping + // TMemoryBuffer in a middleware to json logging requests/responses. + // + // This feature is not available from TDebugProtocolFactory. In order to + // use it you have to construct TDebugProtocol directly, or set DuplicateTo + // field after getting a TDebugProtocol from the factory. + DuplicateTo TProtocol +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string + Logger Logger +} + +// NewTDebugProtocolFactory creates a TDebugProtocolFactory. +// +// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct +// itself instead. This version will use the default logger from standard +// library. +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: StdLogger(nil), + } +} + +// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. +func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + Logger: logger, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + Logger: fallbackLogger(t.Logger), + } +} + +func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { + fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) +} + +func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) + tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMessageEnd(ctx) + tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { + err := tdp.Delegate.WriteStructBegin(ctx, name) + tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return err +} +func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { + err := tdp.Delegate.WriteStructEnd(ctx) + tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) + tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { + err := tdp.Delegate.WriteFieldEnd(ctx) + tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { + err := tdp.Delegate.WriteFieldStop(ctx) + tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldStop(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) + tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { + err := tdp.Delegate.WriteMapEnd(ctx) + tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(ctx, elemType, size) + tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { + err := tdp.Delegate.WriteListEnd(ctx) + tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) + tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return err +} +func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { + err := tdp.Delegate.WriteSetEnd(ctx) + tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return err +} +func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { + err := tdp.Delegate.WriteBool(ctx, value) + tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { + err := tdp.Delegate.WriteByte(ctx, value) + tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { + err := tdp.Delegate.WriteI16(ctx, value) + tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { + err := tdp.Delegate.WriteI32(ctx, value) + tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { + err := tdp.Delegate.WriteI64(ctx, value) + tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { + err := tdp.Delegate.WriteDouble(ctx, value) + tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { + err := tdp.Delegate.WriteString(ctx, value) + tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return err +} +func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { + err := tdp.Delegate.WriteBinary(ctx, value) + tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) + tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) + } + return +} +func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMessageEnd(ctx) + tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMessageEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin(ctx) + tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructBegin(ctx, name) + } + return +} +func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadStructEnd(ctx) + tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteStructEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) + tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) + } + return +} +func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadFieldEnd(ctx) + tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteFieldEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) + tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) + } + return +} +func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadMapEnd(ctx) + tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteMapEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin(ctx) + tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadListEnd(ctx) + tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteListEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) + tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) + } + return +} +func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { + err = tdp.Delegate.ReadSetEnd(ctx) + tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteSetEnd(ctx) + } + return +} +func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { + value, err = tdp.Delegate.ReadBool(ctx) + tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBool(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { + value, err = tdp.Delegate.ReadByte(ctx) + tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteByte(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { + value, err = tdp.Delegate.ReadI16(ctx) + tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI16(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { + value, err = tdp.Delegate.ReadI32(ctx) + tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI32(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { + value, err = tdp.Delegate.ReadI64(ctx) + tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteI64(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { + value, err = tdp.Delegate.ReadDouble(ctx) + tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteDouble(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { + value, err = tdp.Delegate.ReadString(ctx) + tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteString(ctx, value) + } + return +} +func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary(ctx) + tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.WriteBinary(ctx, value) + } + return +} +func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + err = tdp.Delegate.Skip(ctx, fieldType) + tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Skip(ctx, fieldType) + } + return +} +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) + tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) + if tdp.DuplicateTo != nil { + tdp.DuplicateTo.Flush(ctx) + } + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(tdp.Delegate, conf) + PropagateTConfiguration(tdp.DuplicateTo, conf) +} + +var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go new file mode 100644 index 0000000000..cefc7ecda5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "sync" +) + +type TDeserializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolTransport(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } +} + +func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { + t.Transport.Reset() + + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(ctx, t.Protocol); err != nil { + return + } + return +} + +// TDeserializerPool is the thread-safe version of TDeserializer, +// it uses resource pool of TDeserializer under the hood. +// +// It must be initialized with either NewTDeserializerPool or +// NewTDeserializerPoolSizeFactory. +type TDeserializerPool struct { + pool sync.Pool +} + +// NewTDeserializerPool creates a new TDeserializerPool. +// +// NewTDeserializer can be used as the arg here. +func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + return f() + }, + }, + } +} + +// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with +// the given size and protocol factory. +// +// Note that the size is not the limit. The TMemoryBuffer underneath can grow +// larger than that. It just dictates the initial size. +func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { + return &TDeserializerPool{ + pool: sync.Pool{ + New: func() interface{} { + transport := NewTMemoryBufferLen(size) + protocol := factory.GetProtocol(transport) + + return &TDeserializer{ + Transport: transport, + Protocol: protocol, + } + }, + }, + } +} + +func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.ReadString(ctx, msg, s) +} + +func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { + d := t.pool.Get().(*TDeserializer) + defer t.pool.Put(d) + return d.Read(ctx, msg, b) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/exception.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go new file mode 100644 index 0000000000..f683e7f544 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" +) + +// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + + cfg *TConfiguration + + writeBuf bytes.Buffer + + reader *bufio.Reader + readBuf bytes.Buffer + + buffer [4]byte +} + +type tFramedTransportFactory struct { + factory TTransportFactory + cfg *TConfiguration +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportFactoryConf instead. +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return NewTFramedTransportFactoryConf(factory, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { + PropagateTConfiguration(factory, conf) + return &tFramedTransportFactory{ + factory: factory, + cfg: conf, + } +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + PropagateTConfiguration(base, p.cfg) + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportConf(tt, p.cfg), nil +} + +func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.factory, cfg) + p.cfg = cfg +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: DEFAULT_MAX_LENGTH, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTFramedTransportConf instead. +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return NewTFramedTransportConf(transport, &TConfiguration{ + MaxFrameSize: int32(maxLength), + + noPropagation: true, + }) +} + +func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { + PropagateTConfiguration(transport, conf) + return &TFramedTransport{ + transport: transport, + reader: bufio.NewReader(transport), + cfg: conf, + } +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (read int, err error) { + read, err = p.readBuf.Read(buf) + if err != io.EOF { + return + } + + // For bytes.Buffer.Read, EOF would only happen when read is zero, + // but still, do a sanity check, + // in case that behavior is changed in a future version of go stdlib. + // When that happens, just return nil error, + // and let the caller call Read again to read the next frame. + if read > 0 { + return read, nil + } + + // Reaching here means that the last Read finished the last frame, + // so we need to read the next frame into readBuf now. + if err = p.readFrame(); err != nil { + return read, err + } + newRead, err := p.Read(buf[read:]) + return read + newRead, err +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + buf := p.buffer[:1] + _, err = p.Read(buf) + if err != nil { + return + } + c = buf[0] + return +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + n, err := p.writeBuf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + return p.writeBuf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + return p.writeBuf.WriteString(s) +} + +func (p *TFramedTransport) Flush(ctx context.Context) error { + size := p.writeBuf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if _, err := io.Copy(p.transport, &p.writeBuf); err != nil { + p.writeBuf.Reset() + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush(ctx) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrame() error { + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return err + } + size := binary.BigEndian.Uint32(buf) + if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) { + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + _, err := io.CopyN(&p.readBuf, p.reader, int64(size)) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + return uint64(p.readBuf.Len()) +} + +// SetTConfiguration implements TConfigurationSetter. +func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { + PropagateTConfiguration(p.transport, cfg) + p.cfg = cfg +} + +var ( + _ TConfigurationSetter = (*tFramedTransportFactory)(nil) + _ TConfigurationSetter = (*TFramedTransport)(nil) +) diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go similarity index 97% rename from vendor/github.com/uber/jaeger-client-go/thrift/header_context.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go index ca25568823..ac9bd4882b 100644 --- a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go @@ -23,7 +23,7 @@ import ( "context" ) -// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs. +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. type ( headerKey string headerKeyList int diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go similarity index 99% rename from vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go index f5736df427..6a99535a45 100644 --- a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go @@ -28,7 +28,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" ) // Size in bytes for 32-bit ints. @@ -374,7 +373,7 @@ func (t *THeaderTransport) ReadFrame(ctx context.Context) error { if err != nil { return err } - t.frameReader = ioutil.NopCloser(&t.frameBuffer) + t.frameReader = io.NopCloser(&t.frameBuffer) // Peek and handle the next 32 bits. buf = t.frameBuffer.Bytes()[:size32] diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go new file mode 100644 index 0000000000..9a2cc98cc7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/url" + "strconv" +) + +// Default to using the shared http client. Library users are +// free to change this global client or specify one through +// THttpClientOptions. +var DefaultHttpClient *http.Client = http.DefaultClient + +type THttpClient struct { + client *http.Client + response *http.Response + url *url.URL + requestBuffer *bytes.Buffer + header http.Header + nsecConnectTimeout int64 + nsecReadTimeout int64 +} + +type THttpClientTransportFactory struct { + options THttpClientOptions + url string +} + +func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*THttpClient) + if ok && t.url != nil { + return NewTHttpClientWithOptions(t.url.String(), p.options) + } + } + return NewTHttpClientWithOptions(p.url, p.options) +} + +type THttpClientOptions struct { + // If nil, DefaultHttpClient is used + Client *http.Client +} + +func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return &THttpClientTransportFactory{url: url, options: options} +} + +func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + parsedURL, err := url.Parse(urlstr) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + client := options.Client + if client == nil { + client = DefaultHttpClient + } + httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} + return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil +} + +func NewTHttpClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} + +// Set the HTTP Header for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") +func (p *THttpClient) SetHeader(key string, value string) { + p.header.Add(key, value) +} + +// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// hdrValue := httpTrans.GetHeader("User-Agent") +func (p *THttpClient) GetHeader(key string) string { + return p.header.Get(key) +} + +// Deletes the HTTP Header given a Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.DelHeader("User-Agent") +func (p *THttpClient) DelHeader(key string) { + p.header.Del(key) +} + +func (p *THttpClient) Open() error { + // do nothing + return nil +} + +func (p *THttpClient) IsOpen() bool { + return p.response != nil || p.requestBuffer != nil +} + +func (p *THttpClient) closeResponse() error { + var err error + if p.response != nil && p.response.Body != nil { + // The docs specify that if keepalive is enabled and the response body is not + // read to completion the connection will never be returned to the pool and + // reused. Errors are being ignored here because if the connection is invalid + // and this fails for some reason, the Close() method will do any remaining + // cleanup. + io.Copy(io.Discard, p.response.Body) + + err = p.response.Body.Close() + } + + p.response = nil + return err +} + +func (p *THttpClient) Close() error { + if p.requestBuffer != nil { + p.requestBuffer.Reset() + p.requestBuffer = nil + } + return p.closeResponse() +} + +func (p *THttpClient) Read(buf []byte) (int, error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + n, err := p.response.Body.Read(buf) + if n > 0 && (err == nil || errors.Is(err, io.EOF)) { + return n, nil + } + return n, NewTTransportExceptionFromError(err) +} + +func (p *THttpClient) ReadByte() (c byte, err error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + return readByte(p.response.Body) +} + +func (p *THttpClient) Write(buf []byte) (int, error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.Write(buf) +} + +func (p *THttpClient) WriteByte(c byte) error { + if p.requestBuffer == nil { + return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteByte(c) +} + +func (p *THttpClient) WriteString(s string) (n int, err error) { + if p.requestBuffer == nil { + return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") + } + return p.requestBuffer.WriteString(s) +} + +func (p *THttpClient) Flush(ctx context.Context) error { + // Close any previous response body to avoid leaking connections. + p.closeResponse() + + // Give up the ownership of the current request buffer to http request, + // and create a new buffer for the next request. + buf := p.requestBuffer + p.requestBuffer = new(bytes.Buffer) + req, err := http.NewRequest("POST", p.url.String(), buf) + if err != nil { + return NewTTransportExceptionFromError(err) + } + req.Header = p.header + if ctx != nil { + req = req.WithContext(ctx) + } + response, err := p.client.Do(req) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if response.StatusCode != http.StatusOK { + // Close the response to avoid leaking file descriptors. closeResponse does + // more than just call Close(), so temporarily assign it and reuse the logic. + p.response = response + p.closeResponse() + + // TODO(pomack) log bad response + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) + } + p.response = response + return nil +} + +func (p *THttpClient) RemainingBytes() (num_bytes uint64) { + len := p.response.ContentLength + if len >= 0 { + return uint64(len) + } + + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// Deprecated: Use NewTHttpClientTransportFactory instead. +func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. +func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, options) +} + +// Deprecated: Use NewTHttpClientWithOptions instead. +func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, options) +} + +// Deprecated: Use NewTHttpClient instead. +func NewTHttpPostClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go new file mode 100644 index 0000000000..bc6922762a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/gzip" + "io" + "net/http" + "strings" + "sync" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} + +// gz transparently compresses the HTTP response if the client supports it. +func gz(handler http.HandlerFunc) http.HandlerFunc { + sp := &sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, + } + + return func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + handler(w, r) + return + } + w.Header().Set("Content-Encoding", "gzip") + gz := sp.Get().(*gzip.Writer) + gz.Reset(w) + defer func() { + _ = gz.Close() + sp.Put(gz) + }() + gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} + handler(gzw, r) + } +} + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go new file mode 100644 index 0000000000..1c477990fe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" + "io" +) + +// StreamTransport is a Transport made of an io.Reader and/or an io.Writer +type StreamTransport struct { + io.Reader + io.Writer + isReadWriter bool + closed bool +} + +type StreamTransportFactory struct { + Reader io.Reader + Writer io.Writer + isReadWriter bool +} + +func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*StreamTransport) + if ok { + if t.isReadWriter { + return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil + } + if t.Reader != nil && t.Writer != nil { + return NewStreamTransport(t.Reader, t.Writer), nil + } + if t.Reader != nil && t.Writer == nil { + return NewStreamTransportR(t.Reader), nil + } + if t.Reader == nil && t.Writer != nil { + return NewStreamTransportW(t.Writer), nil + } + return &StreamTransport{}, nil + } + } + if p.isReadWriter { + return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil + } + if p.Reader != nil && p.Writer != nil { + return NewStreamTransport(p.Reader, p.Writer), nil + } + if p.Reader != nil && p.Writer == nil { + return NewStreamTransportR(p.Reader), nil + } + if p.Reader == nil && p.Writer != nil { + return NewStreamTransportW(p.Writer), nil + } + return &StreamTransport{}, nil +} + +func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { + return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} +} + +func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportR(r io.Reader) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r)} +} + +func NewStreamTransportW(w io.Writer) *StreamTransport { + return &StreamTransport{Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { + bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) + return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} +} + +func (p *StreamTransport) IsOpen() bool { + return !p.closed +} + +// implicitly opened on creation, can't be reopened once closed +func (p *StreamTransport) Open() error { + if !p.closed { + return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") + } else { + return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") + } +} + +// Closes both the input and output streams. +func (p *StreamTransport) Close() error { + if p.closed { + return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") + } + p.closed = true + closedReader := false + if p.Reader != nil { + c, ok := p.Reader.(io.Closer) + if ok { + e := c.Close() + closedReader = true + if e != nil { + return e + } + } + p.Reader = nil + } + if p.Writer != nil && (!closedReader || !p.isReadWriter) { + c, ok := p.Writer.(io.Closer) + if ok { + e := c.Close() + if e != nil { + return e + } + } + p.Writer = nil + } + return nil +} + +// Flushes the underlying output stream if not null. +func (p *StreamTransport) Flush(ctx context.Context) error { + if p.Writer == nil { + return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") + } + f, ok := p.Writer.(Flusher) + if ok { + err := f.Flush() + if err != nil { + return NewTTransportExceptionFromError(err) + } + } + return nil +} + +func (p *StreamTransport) Read(c []byte) (n int, err error) { + n, err = p.Reader.Read(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) ReadByte() (c byte, err error) { + f, ok := p.Reader.(io.ByteReader) + if ok { + c, err = f.ReadByte() + } else { + c, err = readByte(p.Reader) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) Write(c []byte) (n int, err error) { + n, err = p.Writer.Write(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteByte(c byte) (err error) { + f, ok := p.Writer.(io.ByteWriter) + if ok { + err = f.WriteByte(c) + } else { + err = writeByte(p.Writer, c) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteString(s string) (n int, err error) { + f, ok := p.Writer.(stringWriter) + if ok { + n, err = f.WriteString(s) + } else { + n, err = p.Writer.Write([]byte(s)) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(p.Reader, conf) + PropagateTConfiguration(p.Writer, conf) +} + +var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go new file mode 100644 index 0000000000..8e59d16cfd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go @@ -0,0 +1,591 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/base64" + "fmt" +) + +const ( + THRIFT_JSON_PROTOCOL_VERSION = 1 +) + +// for references to _ParseContext see tsimplejson_protocol.go + +// JSON protocol implementation for thrift. +// Utilizes Simple JSON protocol +// +type TJSONProtocol struct { + *TSimpleJSONProtocol +} + +// Constructor +func NewTJSONProtocol(t TTransport) *TJSONProtocol { + v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} + v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) + v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) + return v +} + +// Factory +type TJSONProtocolFactory struct{} + +func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTJSONProtocol(trans) +} + +func NewTJSONProtocolFactory() *TJSONProtocolFactory { + return &TJSONProtocolFactory{} +} + +func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { + return e + } + if e := p.WriteString(ctx, name); e != nil { + return e + } + if e := p.WriteByte(ctx, int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(ctx, seqId); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { + if e := p.WriteI16(ctx, id); e != nil { + return e + } + if e := p.OutputObjectBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(typeId) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } + +func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(keyType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + s, e1 = p.TypeIdToString(valueType) + if e1 != nil { + return e1 + } + if e := p.WriteString(ctx, s); e != nil { + return e + } + if e := p.WriteI64(ctx, int64(size)); e != nil { + return e + } + return p.OutputObjectBegin() +} + +func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { + if e := p.OutputObjectEnd(); e != nil { + return e + } + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { + if b { + return p.WriteI32(ctx, 1) + } + return p.WriteI32(ctx, 0) +} + +func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { + return p.WriteI32(ctx, int32(b)) +} + +func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { + return p.WriteI32(ctx, int32(v)) +} + +func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { + return p.OutputF64(v) +} + +func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { + return p.OutputString(v) +} + +func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + version, err := p.ReadI32(ctx) + if err != nil { + return name, typeId, seqId, err + } + if version != THRIFT_JSON_PROTOCOL_VERSION { + e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) + return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) + + } + if name, err = p.ReadString(ctx); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte(ctx) + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(ctx); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { + err := p.ParseListEnd() + return err +} + +func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { + b, _ := p.reader.Peek(1) + if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { + return "", STOP, -1, nil + } + fieldId, err := p.ReadI16(ctx) + if err != nil { + return "", STOP, fieldId, err + } + if _, err = p.ParseObjectStart(); err != nil { + return "", STOP, fieldId, err + } + sType, err := p.ReadString(ctx) + if err != nil { + return "", STOP, fieldId, err + } + fType, err := p.StringToTypeId(sType) + return "", fType, fieldId, err +} + +func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + sKeyType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + keyType, e = p.StringToTypeId(sKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + sValueType, e := p.ReadString(ctx) + if e != nil { + return keyType, valueType, size, e + } + valueType, e = p.StringToTypeId(sValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, e := p.ReadI64(ctx) + if e != nil { + return keyType, valueType, size, e + } + size = int(iSize) + + _, e = p.ParseObjectStart() + return keyType, valueType, size, e +} + +func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { + e := p.ParseObjectEnd() + if e != nil { + return e + } + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { + value, err := p.ReadI32(ctx) + return (value != 0), err +} + +func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { + v, err := p.ReadI64(ctx) + return int8(v), err +} + +func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { + v, err := p.ReadI64(ctx) + return int16(v), err +} + +func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { + v, err := p.ReadI64(ctx) + return int32(v), err +} + +func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { + err = p.writer.Flush() + if err == nil { + err = p.trans.Flush(ctx) + } + return NewTProtocolException(err) +} + +func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { + return SkipDefaultDepth(ctx, p, fieldType) +} + +func (p *TJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err2 := p.ParseI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + // We don't really use the ctx in ReadString implementation, + // so this is safe for now. + // We might want to add context to ParseElemListBegin if we start to use + // ctx in ReadString implementation in the future. + sElemType, err := p.ReadString(context.Background()) + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, _, err2 := p.ParseI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { + switch byte(fieldType) { + case BOOL: + return "tf", nil + case BYTE: + return "i8", nil + case I16: + return "i16", nil + case I32: + return "i32", nil + case I64: + return "i64", nil + case DOUBLE: + return "dbl", nil + case STRING: + return "str", nil + case STRUCT: + return "rec", nil + case MAP: + return "map", nil + case SET: + return "set", nil + case LIST: + return "lst", nil + } + + e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { + switch fieldType { + case "tf": + return TType(BOOL), nil + case "i8": + return TType(BYTE), nil + case "i16": + return TType(I16), nil + case "i32": + return TType(I32), nil + case "i64": + return TType(I64), nil + case "dbl": + return TType(DOUBLE), nil + case "str": + return TType(STRING), nil + case "rec": + return TType(STRUCT), nil + case "map": + return TType(MAP), nil + case "set": + return TType(SET), nil + case "lst": + return TType(LIST), nil + } + + e := fmt.Errorf("Unknown type identifier: %s", fieldType) + return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go new file mode 100644 index 0000000000..c42aac998b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "os" + "testing" +) + +// Logger is a simple wrapper of a logging function. +// +// In reality the users might actually use different logging libraries, and they +// are not always compatible with each other. +// +// Logger is meant to be a simple common ground that it's easy to wrap whatever +// logging library they use into. +// +// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design +// discussion behind it. +type Logger func(msg string) + +// NopLogger is a Logger implementation that does nothing. +func NopLogger(msg string) {} + +// StdLogger wraps stdlib log package into a Logger. +// +// If logger passed in is nil, it will fallback to use stderr and default flags. +func StdLogger(logger *log.Logger) Logger { + if logger == nil { + logger = log.New(os.Stderr, "", log.LstdFlags) + } + return func(msg string) { + logger.Print(msg) + } +} + +// TestLogger is a Logger implementation can be used in test codes. +// +// It fails the test when being called. +func TestLogger(tb testing.TB) Logger { + return func(msg string) { + tb.Errorf("logger called with msg: %q", msg) + } +} + +func fallbackLogger(logger Logger) Logger { + if logger == nil { + return StdLogger(nil) + } + return logger +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go new file mode 100644 index 0000000000..8a788df02b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the +// TProcessorFunctions for that TProcessor. +// +// Middlewares are passed in the name of the function as set in the processor +// map of the TProcessor. +type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction + +// WrapProcessor takes an existing TProcessor and wraps each of its inner +// TProcessorFunctions with the middlewares passed in and returns it. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { + for name, processorFunc := range processor.ProcessorMap() { + wrapped := processorFunc + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i](name, wrapped) + } + processor.AddToProcessorMap(name, wrapped) + } + return processor +} + +// WrappedTProcessorFunction is a convenience struct that implements the +// TProcessorFunction interface that can be used when implementing custom +// Middleware. +type WrappedTProcessorFunction struct { + // Wrapped is called by WrappedTProcessorFunction.Process and should be a + // "wrapped" call to a base TProcessorFunc.Process call. + Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// Process implements the TProcessorFunction interface using p.Wrapped. +func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { + return p.Wrapped(ctx, seqID, in, out) +} + +// verify that WrappedTProcessorFunction implements TProcessorFunction +var ( + _ TProcessorFunction = WrappedTProcessorFunction{} + _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) +) + +// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls +// with custom middleware. +type ClientMiddleware func(TClient) TClient + +// WrappedTClient is a convenience struct that implements the TClient interface +// using inner Wrapped function. +// +// This is provided to aid in developing ClientMiddleware. +type WrappedTClient struct { + Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) +} + +// Call implements the TClient interface by calling and returning c.Wrapped. +func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { + return c.Wrapped(ctx, method, args, result) +} + +// verify that WrappedTClient implements TClient +var ( + _ TClient = WrappedTClient{} + _ TClient = (*WrappedTClient)(nil) +) + +// WrapClient wraps the given TClient in the given middlewares. +// +// Middlewares will be called in the order that they are defined: +// +// 1. Middlewares[0] +// 2. Middlewares[1] +// ... +// N. Middlewares[n] +func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { + // Add middlewares in reverse so the first in the list is the outermost. + for i := len(middlewares) - 1; i >= 0; i-- { + client = middlewares[i](client) + } + return client +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go new file mode 100644 index 0000000000..cacbf6bef3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" + "strings" +) + +/* +TMultiplexedProtocol is a protocol-independent concrete decorator +that allows a Thrift client to communicate with a multiplexing Thrift server, +by prepending the service name to the function name during function calls. + +NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request +from a multiplexing client. + +This example uses a single socket transport to invoke two services: + +socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) +transport := thrift.NewTFramedTransport(socket) +protocol := thrift.NewTBinaryProtocolTransport(transport) + +mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") +service := Calculator.NewCalculatorClient(mp) + +mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") +service2 := WeatherReport.NewWeatherReportClient(mp2) + +err := transport.Open() +if err != nil { + t.Fatal("Unable to open client socket", err) +} + +fmt.Println(service.Add(2,2)) +fmt.Println(service2.GetTemperature()) +*/ + +type TMultiplexedProtocol struct { + TProtocol + serviceName string +} + +const MULTIPLEXED_SEPARATOR = ":" + +func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { + return &TMultiplexedProtocol{ + TProtocol: protocol, + serviceName: serviceName, + } +} + +func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { + if typeId == CALL || typeId == ONEWAY { + return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + } else { + return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) + } +} + +/* +TMultiplexedProcessor is a TProcessor allowing +a single TServer to provide multiple services. + +To do so, you instantiate the processor and then register additional +processors with it, as shown in the following example: + +var processor = thrift.NewTMultiplexedProcessor() + +firstProcessor := +processor.RegisterProcessor("FirstService", firstProcessor) + +processor.registerProcessor( + "Calculator", + Calculator.NewCalculatorProcessor(&CalculatorHandler{}), +) + +processor.registerProcessor( + "WeatherReport", + WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), +) + +serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) +if err != nil { + t.Fatal("Unable to create server socket", err) +} +server := thrift.NewTSimpleServer2(processor, serverTransport) +server.Serve(); +*/ + +type TMultiplexedProcessor struct { + serviceProcessorMap map[string]TProcessor + DefaultProcessor TProcessor +} + +func NewTMultiplexedProcessor() *TMultiplexedProcessor { + return &TMultiplexedProcessor{ + serviceProcessorMap: make(map[string]TProcessor), + } +} + +// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" +// to TProcessorFunction for any registered processors. If there is also a +// DefaultProcessor, the keys for the methods on that processor will simply be +// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and +// other registered processors, then the keys will be a mix of both formats. +// +// The implementation differs with other TProcessors in that the map returned is +// a new map, while most TProcessors just return their internal mapping directly. +// This means that edits to the map returned by this implementation of ProcessorMap +// will not affect the underlying mapping within the TMultiplexedProcessor. +func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { + processorFuncMap := make(map[string]TProcessorFunction) + for name, processor := range t.serviceProcessorMap { + for method, processorFunc := range processor.ProcessorMap() { + processorFuncName := name + MULTIPLEXED_SEPARATOR + method + processorFuncMap[processorFuncName] = processorFunc + } + } + if t.DefaultProcessor != nil { + for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { + processorFuncMap[method] = processorFunc + } + } + return processorFuncMap +} + +// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on +// the format of "name". +// +// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", +// then it sets the given TProcessorFunction on the inner TProcessor with the +// ProcessorName component using the FunctionName component. +// +// If "name" is just in the format "{FunctionName}", that is to say there is no +// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor +// configured, then it will set the given TProcessorFunction on the DefaultProcessor +// using the given name. +// +// If there is not a TProcessor available for the given name, then this function +// does nothing. This can happen when there is no TProcessor registered for +// the given ProcessorName or if all that is given is the FunctionName and there +// is no DefaultProcessor set. +func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { + components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(components) != 2 { + if t.DefaultProcessor != nil && len(components) == 1 { + t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc) + } + return + } + processorName := components[0] + funcName := components[1] + if processor, ok := t.serviceProcessorMap[processorName]; ok { + processor.AddToProcessorMap(funcName, processorFunc) + } + +} + +// verify that TMultiplexedProcessor implements TProcessor +var _ TProcessor = (*TMultiplexedProcessor)(nil) + +func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { + t.DefaultProcessor = processor +} + +func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { + if t.serviceProcessorMap == nil { + t.serviceProcessorMap = make(map[string]TProcessor) + } + t.serviceProcessorMap[name] = processor +} + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin(ctx) + if err != nil { + return false, NewTProtocolException(err) + } + if typeId != CALL && typeId != ONEWAY { + return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, NewTProtocolException(fmt.Errorf( + "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", + name, + )) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, NewTProtocolException(fmt.Errorf( + "Service name not found: %s. Did you forget to call registerProcessor()?", + v[0], + )) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} + +//Protocol that use stored message for ReadMessageBegin +type storedMessageProtocol struct { + TProtocol + name string + typeId TMessageType + seqid int32 +} + +func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { + return &storedMessageProtocol{protocol, name, typeId, seqid} +} + +func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { + return s.name, s.typeId, s.seqid, nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/numeric.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go new file mode 100644 index 0000000000..fb564ea819 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +/////////////////////////////////////////////////////////////////////////////// +// This file is home to helpers that convert from various base types to +// respective pointer types. This is necessary because Go does not permit +// references to constants, nor can a pointer type to base type be allocated +// and initialized in a single expression. +// +// E.g., this is not allowed: +// +// var ip *int = &5 +// +// But this *is* allowed: +// +// func IntPtr(i int) *int { return &i } +// var ip *int = IntPtr(5) +// +// Since pointers to base types are commonplace as [optional] fields in +// exported thrift structs, we factor such helpers here. +/////////////////////////////////////////////////////////////////////////////// + +func Float32Ptr(v float32) *float32 { return &v } +func Float64Ptr(v float64) *float64 { return &v } +func IntPtr(v int) *int { return &v } +func Int8Ptr(v int8) *int8 { return &v } +func Int16Ptr(v int16) *int16 { return &v } +func Int32Ptr(v int32) *int32 { return &v } +func Int64Ptr(v int64) *int64 { return &v } +func StringPtr(v string) *string { return &v } +func Uint32Ptr(v uint32) *uint32 { return &v } +func Uint64Ptr(v uint64) *uint64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/protocol.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go similarity index 97% rename from vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go index 02f0613956..d884c6ac6c 100644 --- a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go @@ -23,7 +23,7 @@ import ( "context" ) -// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs. +// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. type responseHelperKey struct{} // TResponseHelper defines a object with a set of helper functions that can be diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/serializer.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go new file mode 100644 index 0000000000..f813fa3532 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TServer interface { + ProcessorFactory() TProcessorFactory + ServerTransport() TServerTransport + InputTransportFactory() TTransportFactory + OutputTransportFactory() TTransportFactory + InputProtocolFactory() TProtocolFactory + OutputProtocolFactory() TProtocolFactory + + // Starts the server + Serve() error + // Stops the server. This is optional on a per-implementation basis. Not + // all servers are required to be cleanly stoppable. + Stop() error +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go new file mode 100644 index 0000000000..7dd24ae364 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "sync" + "time" +) + +type TServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + + // Protects the interrupted value to make it thread safe. + mu sync.RWMutex + interrupted bool +} + +func NewTServerSocket(listenAddr string) (*TServerSocket, error) { + return NewTServerSocketTimeout(listenAddr, 0) +} + +func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil +} + +// Creates a TServerSocket from a net.Addr +func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { + return &TServerSocket{addr: addr, clientTimeout: clientTimeout} +} + +func (p *TServerSocket) Listen() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return nil + } + l, err := net.Listen(p.addr.Network(), p.addr.String()) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TServerSocket) Accept() (TTransport, error) { + p.mu.RLock() + interrupted := p.interrupted + p.mu.RUnlock() + + if interrupted { + return nil, errTransportInterrupted + } + + p.mu.Lock() + listener := p.listener + p.mu.Unlock() + if listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + + conn, err := listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TServerSocket) Open() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TServerSocket) Close() error { + var err error + p.mu.Lock() + if p.IsListening() { + err = p.listener.Close() + p.listener = nil + } + p.mu.Unlock() + return err +} + +func (p *TServerSocket) Interrupt() error { + p.mu.Lock() + p.interrupted = true + p.mu.Unlock() + p.Close() + + return nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go new file mode 100644 index 0000000000..e911bf1668 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "net" + "time" +) + +type TSocket struct { + conn *socketConn + addr net.Addr + cfg *TConfiguration + + connectTimeout time.Duration + socketTimeout time.Duration +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + noPropagation: true, + }) +} + +// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", hostPort) + if err != nil { + return nil, err + } + return NewTSocketFromAddrConf(addr, conf), nil +} + +// Deprecated: Use NewTSocketConf instead. +func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { + return NewTSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromAddrConf creates a TSocket from a net.Addr +func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { + return &TSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromAddrConf instead. +func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { + return NewTSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connTimeout, + SocketTimeout: soTimeout, + + noPropagation: true, + }) +} + +// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. +func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { + return &TSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSocketFromConnConf instead. +func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { + return NewTSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to set connect and socket timeouts. +func (p *TSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{ + noPropagation: true, + } + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = createSocketConnFromReturn(net.DialTimeout( + p.addr.Network(), + p.addr.String(), + p.cfg.GetConnectTimeout(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go new file mode 100644 index 0000000000..c1cc30c6cc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" +) + +// socketConn is a wrapped net.Conn that tries to do connectivity check. +type socketConn struct { + net.Conn + + buffer [1]byte +} + +var _ net.Conn = (*socketConn)(nil) + +// createSocketConnFromReturn is a language sugar to help create socketConn from +// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. +func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { + if err != nil { + return nil, err + } + return &socketConn{ + Conn: conn, + }, nil +} + +// wrapSocketConn wraps an existing net.Conn into *socketConn. +func wrapSocketConn(conn net.Conn) *socketConn { + // In case conn is already wrapped, + // return it as-is and avoid double wrapping. + if sc, ok := conn.(*socketConn); ok { + return sc + } + + return &socketConn{ + Conn: conn, + } +} + +// isValid checks whether there's a valid connection. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// It's the same as the previous implementation of TSocket.IsOpen and +// TSSLSocket.IsOpen before we added connectivity check. +func (sc *socketConn) isValid() bool { + return sc != nil && sc.Conn != nil +} + +// IsOpen checks whether the connection is open. +// +// It's nil safe, and returns false if sc itself is nil, or if the underlying +// connection is nil. +// +// Otherwise, it tries to do a connectivity check and returns the result. +// +// It also has the side effect of resetting the previously set read deadline on +// the socket. As a result, it shouldn't be called between setting read deadline +// and doing actual read. +func (sc *socketConn) IsOpen() bool { + if !sc.isValid() { + return false + } + return sc.checkConn() == nil +} + +// Read implements io.Reader. +// +// On Windows, it behaves the same as the underlying net.Conn.Read. +// +// On non-Windows, it treats len(p) == 0 as a connectivity check instead of +// readability check, which means instead of blocking until there's something to +// read (readability check), or always return (0, nil) (the default behavior of +// go's stdlib implementation on non-Windows), it never blocks, and will return +// an error if the connection is lost. +func (sc *socketConn) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, sc.read0() + } + + return sc.Conn.Read(p) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go new file mode 100644 index 0000000000..f5fab3ab65 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go @@ -0,0 +1,83 @@ +// +build !windows + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" + "syscall" + "time" +) + +// We rely on this variable to be the zero time, +// but define it as global variable to avoid repetitive allocations. +// Please DO NOT mutate this variable in any way. +var zeroTime time.Time + +func (sc *socketConn) read0() error { + return sc.checkConn() +} + +func (sc *socketConn) checkConn() error { + syscallConn, ok := sc.Conn.(syscall.Conn) + if !ok { + // No way to check, return nil + return nil + } + + // The reading about to be done here is non-blocking so we don't really + // need a read deadline. We just need to clear the previously set read + // deadline, if any. + sc.Conn.SetReadDeadline(zeroTime) + + rc, err := syscallConn.SyscallConn() + if err != nil { + return err + } + + var n int + + if readErr := rc.Read(func(fd uintptr) bool { + n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) + return true + }); readErr != nil { + return readErr + } + + if n > 0 { + // We got something, which means we are good + return nil + } + + if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { + // This means the connection is still open but we don't have + // anything to read right now. + return nil + } + + if err != nil { + return err + } + + // At this point, it means the other side already closed the connection. + return io.EOF +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go new file mode 100644 index 0000000000..679838c3b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go @@ -0,0 +1,34 @@ +// +build windows + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +func (sc *socketConn) read0() error { + // On windows, we fallback to the default behavior of reading 0 bytes. + var p []byte + _, err := sc.Conn.Read(p) + return err +} + +func (sc *socketConn) checkConn() error { + // On windows, we always return nil for this check. + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go new file mode 100644 index 0000000000..907afca326 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + interrupted bool + cfg *tls.Config +} + +func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { + return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) +} + +func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil +} + +func (p *TSSLServerSocket) Listen() error { + if p.IsListening() { + return nil + } + l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TSSLServerSocket) Accept() (TTransport, error) { + if p.interrupted { + return nil, errTransportInterrupted + } + if p.listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + conn, err := p.listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TSSLServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLServerSocket) Open() error { + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TSSLServerSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSSLServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TSSLServerSocket) Interrupt() error { + p.interrupted = true + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go new file mode 100644 index 0000000000..6359a74ceb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn *socketConn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + + cfg *TConfiguration +} + +// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. +// +// Example: +// +// trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{ +// ConnectTimeout: time.Second, // Use 0 for no timeout +// SocketTimeout: time.Second, // Use 0 for no timeout +// }) +func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) { + if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{ + hostPort: hostPort, + cfg: conf, + }, nil +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// Deprecated: Use NewTSSLSocketConf instead. +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { + return NewTSSLSocketConf(hostPort, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. +func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + addr: addr, + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromAddrConf instead. +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ + ConnectTimeout: connectTimeout, + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. +func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { + return &TSSLSocket{ + conn: wrapSocketConn(conn), + addr: conn.RemoteAddr(), + cfg: conf, + } +} + +// Deprecated: Use NewTSSLSocketFromConnConf instead. +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { + return NewTSSLSocketFromConnConf(conn, &TConfiguration{ + SocketTimeout: socketTimeout, + TLSConfig: cfg, + + noPropagation: true, + }) +} + +// SetTConfiguration implements TConfigurationSetter. +// +// It can be used to change connect and socket timeouts. +func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { + p.cfg = conf +} + +// Sets the connect timeout +func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.ConnectTimeout = timeout + return nil +} + +// Sets the socket timeout +func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { + if p.cfg == nil { + p.cfg = &TConfiguration{} + } + p.cfg.SocketTimeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { + t = time.Now().Add(time.Duration(timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + "tcp", + p.hostPort, + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } else { + if p.conn.isValid() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( + &net.Dialer{ + Timeout: p.cfg.GetConnectTimeout(), + }, + p.addr.Network(), + p.addr.String(), + p.cfg.GetTLSConfig(), + )); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + return p.conn.IsOpen() +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between + // p.pushDeadline and p.conn.Read could cause the deadline set inside + // p.pushDeadline being reset, thus need to be avoided. + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.conn.isValid() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.conn.isValid() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the truth is, we just don't know unless framed is used +} + +var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/transport.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go similarity index 100% rename from vendor/github.com/uber/jaeger-client-go/thrift/type.go rename to vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go new file mode 100644 index 0000000000..259943a627 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go @@ -0,0 +1,137 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. + */ + +package thrift + +import ( + "compress/zlib" + "context" + "io" +) + +// TZlibTransportFactory is a factory for TZlibTransport instances +type TZlibTransportFactory struct { + level int + factory TTransportFactory +} + +// TZlibTransport is a TTransport implementation that makes use of zlib compression. +type TZlibTransport struct { + reader io.ReadCloser + transport TTransport + writer *zlib.Writer +} + +// GetTransport constructs a new instance of NewTZlibTransport +func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if p.factory != nil { + // wrap other factory + var err error + trans, err = p.factory.GetTransport(trans) + if err != nil { + return nil, err + } + } + return NewTZlibTransport(trans, p.level) +} + +// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory +func NewTZlibTransportFactory(level int) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: nil} +} + +// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory +// as a wrapper over existing transport factory +func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: factory} +} + +// NewTZlibTransport constructs a new instance of TZlibTransport +func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { + w, err := zlib.NewWriterLevel(trans, level) + if err != nil { + return nil, err + } + + return &TZlibTransport{ + writer: w, + transport: trans, + }, nil +} + +// Close closes the reader and writer (flushing any unwritten data) and closes +// the underlying transport. +func (z *TZlibTransport) Close() error { + if z.reader != nil { + if err := z.reader.Close(); err != nil { + return err + } + } + if err := z.writer.Close(); err != nil { + return err + } + return z.transport.Close() +} + +// Flush flushes the writer and its underlying transport. +func (z *TZlibTransport) Flush(ctx context.Context) error { + if err := z.writer.Flush(); err != nil { + return err + } + return z.transport.Flush(ctx) +} + +// IsOpen returns true if the transport is open +func (z *TZlibTransport) IsOpen() bool { + return z.transport.IsOpen() +} + +// Open opens the transport for communication +func (z *TZlibTransport) Open() error { + return z.transport.Open() +} + +func (z *TZlibTransport) Read(p []byte) (int, error) { + if z.reader == nil { + r, err := zlib.NewReader(z.transport) + if err != nil { + return 0, NewTTransportExceptionFromError(err) + } + z.reader = r + } + + return z.reader.Read(p) +} + +// RemainingBytes returns the size in bytes of the data that is still to be +// read. +func (z *TZlibTransport) RemainingBytes() uint64 { + return z.transport.RemainingBytes() +} + +func (z *TZlibTransport) Write(p []byte) (int, error) { + return z.writer.Write(p) +} + +// SetTConfiguration implements TConfigurationSetter for propagation. +func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { + PropagateTConfiguration(z.transport, conf) +} + +var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go new file mode 100644 index 0000000000..972e254146 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/jaeger.go @@ -0,0 +1,360 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +const ( + keyInstrumentationLibraryName = "otel.library.name" + keyInstrumentationLibraryVersion = "otel.library.version" + keyError = "error" + keySpanKind = "span.kind" + keyStatusCode = "otel.status_code" + keyStatusMessage = "otel.status_description" + keyDroppedAttributeCount = "otel.event.dropped_attributes_count" + keyEventName = "event" +) + +// New returns an OTel Exporter implementation that exports the collected +// spans to Jaeger. +func New(endpointOption EndpointOption) (*Exporter, error) { + uploader, err := endpointOption.newBatchUploader() + if err != nil { + return nil, err + } + + // Fetch default service.name from default resource for backup + var defaultServiceName string + defaultResource := resource.Default() + if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { + defaultServiceName = value.AsString() + } + if defaultServiceName == "" { + return nil, fmt.Errorf("failed to get service name from default resource") + } + + stopCh := make(chan struct{}) + e := &Exporter{ + uploader: uploader, + stopCh: stopCh, + defaultServiceName: defaultServiceName, + } + return e, nil +} + +// Exporter exports OpenTelemetry spans to a Jaeger agent or collector. +type Exporter struct { + uploader batchUploader + stopOnce sync.Once + stopCh chan struct{} + defaultServiceName string +} + +var _ sdktrace.SpanExporter = (*Exporter)(nil) + +// ExportSpans transforms and exports OpenTelemetry spans to Jaeger. +func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + // Return fast if context is already canceled or Exporter shutdown. + select { + case <-ctx.Done(): + return ctx.Err() + case <-e.stopCh: + return nil + default: + } + + // Cancel export if Exporter is shutdown. + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + case <-e.stopCh: + cancel() + } + }(ctx, cancel) + + for _, batch := range jaegerBatchList(spans, e.defaultServiceName) { + if err := e.uploader.upload(ctx, batch); err != nil { + return err + } + } + + return nil +} + +// Shutdown stops the Exporter. This will close all connections and release +// all resources held by the Exporter. +func (e *Exporter) Shutdown(ctx context.Context) error { + // Stop any active and subsequent exports. + e.stopOnce.Do(func() { close(e.stopCh) }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return e.uploader.shutdown(ctx) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + }{ + Type: "jaeger", + } +} + +func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span { + attr := ss.Attributes() + tags := make([]*gen.Tag, 0, len(attr)) + for _, kv := range attr { + tag := keyValueToTag(kv) + if tag != nil { + tags = append(tags, tag) + } + } + + if is := ss.InstrumentationScope(); is.Name != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name)) + if is.Version != "" { + tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version)) + } + } + + if ss.SpanKind() != trace.SpanKindInternal { + tags = append(tags, + getStringTag(keySpanKind, ss.SpanKind().String()), + ) + } + + if ss.Status().Code != codes.Unset { + switch ss.Status().Code { + case codes.Ok: + tags = append(tags, getStringTag(keyStatusCode, "OK")) + case codes.Error: + tags = append(tags, getBoolTag(keyError, true)) + tags = append(tags, getStringTag(keyStatusCode, "ERROR")) + } + if ss.Status().Description != "" { + tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description)) + } + } + + var logs []*gen.Log + for _, a := range ss.Events() { + nTags := len(a.Attributes) + if a.Name != "" { + nTags++ + } + if a.DroppedAttributeCount != 0 { + nTags++ + } + fields := make([]*gen.Tag, 0, nTags) + if a.Name != "" { + // If an event contains an attribute with the same key, it needs + // to be given precedence and overwrite this. + fields = append(fields, getStringTag(keyEventName, a.Name)) + } + for _, kv := range a.Attributes { + tag := keyValueToTag(kv) + if tag != nil { + fields = append(fields, tag) + } + } + if a.DroppedAttributeCount != 0 { + fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount))) + } + logs = append(logs, &gen.Log{ + Timestamp: a.Time.UnixNano() / 1000, + Fields: fields, + }) + } + + var refs []*gen.SpanRef + for _, link := range ss.Links() { + tid := link.SpanContext.TraceID() + sid := link.SpanContext.SpanID() + refs = append(refs, &gen.SpanRef{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + RefType: gen.SpanRefType_FOLLOWS_FROM, + }) + } + + tid := ss.SpanContext().TraceID() + sid := ss.SpanContext().SpanID() + psid := ss.Parent().SpanID() + return &gen.Span{ + TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])), + TraceIdLow: int64(binary.BigEndian.Uint64(tid[8:16])), + SpanId: int64(binary.BigEndian.Uint64(sid[:])), + ParentSpanId: int64(binary.BigEndian.Uint64(psid[:])), + OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv" + Flags: int32(ss.SpanContext().TraceFlags()), + StartTime: ss.StartTime().UnixNano() / 1000, + Duration: ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000, + Tags: tags, + Logs: logs, + References: refs, + } +} + +func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag { + var tag *gen.Tag + switch keyValue.Value.Type() { + case attribute.STRING: + s := keyValue.Value.AsString() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &s, + VType: gen.TagType_STRING, + } + case attribute.BOOL: + b := keyValue.Value.AsBool() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VBool: &b, + VType: gen.TagType_BOOL, + } + case attribute.INT64: + i := keyValue.Value.AsInt64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VLong: &i, + VType: gen.TagType_LONG, + } + case attribute.FLOAT64: + f := keyValue.Value.AsFloat64() + tag = &gen.Tag{ + Key: string(keyValue.Key), + VDouble: &f, + VType: gen.TagType_DOUBLE, + } + case attribute.BOOLSLICE, + attribute.INT64SLICE, + attribute.FLOAT64SLICE, + attribute.STRINGSLICE: + data, _ := json.Marshal(keyValue.Value.AsInterface()) + a := (string)(data) + tag = &gen.Tag{ + Key: string(keyValue.Key), + VStr: &a, + VType: gen.TagType_STRING, + } + } + return tag +} + +func getInt64Tag(k string, i int64) *gen.Tag { + return &gen.Tag{ + Key: k, + VLong: &i, + VType: gen.TagType_LONG, + } +} + +func getStringTag(k, s string) *gen.Tag { + return &gen.Tag{ + Key: k, + VStr: &s, + VType: gen.TagType_STRING, + } +} + +func getBoolTag(k string, b bool) *gen.Tag { + return &gen.Tag{ + Key: k, + VBool: &b, + VType: gen.TagType_BOOL, + } +} + +// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch. +func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch { + if len(ssl) == 0 { + return nil + } + + batchDict := make(map[attribute.Distinct]*gen.Batch) + + for _, ss := range ssl { + if ss == nil { + continue + } + + resourceKey := ss.Resource().Equivalent() + batch, bOK := batchDict[resourceKey] + if !bOK { + batch = &gen.Batch{ + Process: process(ss.Resource(), defaultServiceName), + Spans: []*gen.Span{}, + } + } + batch.Spans = append(batch.Spans, spanToThrift(ss)) + batchDict[resourceKey] = batch + } + + // Transform the categorized map into a slice + batchList := make([]*gen.Batch, 0, len(batchDict)) + for _, batch := range batchDict { + batchList = append(batchList, batch) + } + return batchList +} + +// process transforms an OTel Resource into a jaeger Process. +func process(res *resource.Resource, defaultServiceName string) *gen.Process { + var process gen.Process + + var serviceName attribute.KeyValue + if res != nil { + for iter := res.Iter(); iter.Next(); { + if iter.Attribute().Key == semconv.ServiceNameKey { + serviceName = iter.Attribute() + // Don't convert service.name into tag. + continue + } + if tag := keyValueToTag(iter.Attribute()); tag != nil { + process.Tags = append(process.Tags, tag) + } + } + } + + // If no service.name is contained in a Span's Resource, + // that field MUST be populated from the default Resource. + if serviceName.Value.AsString() == "" { + serviceName = semconv.ServiceNameKey.String(defaultServiceName) + } + process.ServiceName = serviceName.Value.AsString() + + return &process +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go new file mode 100644 index 0000000000..88055c8a30 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/reconnecting_udp_client.go @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" +) + +// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +type reconnectingUDPConn struct { + // `sync/atomic` expects the first word in an allocated struct to be 64-bit + // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details. + bufferBytes int64 + hostPort string + resolveFunc resolveFunc + dialFunc dialFunc + logger logr.Logger + + connMtx sync.RWMutex + conn *net.UDPConn + destAddr *net.UDPAddr + closeChan chan struct{} +} + +type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error) +type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) + +// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is +// different than the current conn then the new address is dialed and the conn is swapped. +func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) { + conn := &reconnectingUDPConn{ + hostPort: hostPort, + resolveFunc: resolveFunc, + dialFunc: dialFunc, + logger: logger, + closeChan: make(chan struct{}), + bufferBytes: int64(bufferBytes), + } + + if err := conn.attemptResolveAndDial(); err != nil { + conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout) + } + + go conn.reconnectLoop(resolveTimeout) + + return conn, nil +} + +func (c *reconnectingUDPConn) logf(format string, args ...interface{}) { + if c.logger != emptyLogger { + c.logger.Info(format, args...) + } +} + +func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) { + ticker := time.NewTicker(resolveTimeout) + defer ticker.Stop() + + for { + select { + case <-c.closeChan: + return + case <-ticker.C: + if err := c.attemptResolveAndDial(); err != nil { + c.logf("%s", err.Error()) + } + } + } +} + +func (c *reconnectingUDPConn) attemptResolveAndDial() error { + newAddr, err := c.resolveFunc("udp", c.hostPort) + if err != nil { + return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err) + } + + c.connMtx.RLock() + curAddr := c.destAddr + c.connMtx.RUnlock() + + // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn + if curAddr != nil && newAddr.String() == curAddr.String() { + return nil + } + + if err := c.attemptDialNewAddr(newAddr); err != nil { + return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err) + } + + return nil +} + +func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error { + connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr) + if err != nil { + return err + } + + if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 { + if err = connUDP.SetWriteBuffer(bufferBytes); err != nil { + return err + } + } + + c.connMtx.Lock() + c.destAddr = newAddr + // store prev to close later + prevConn := c.conn + c.conn = connUDP + c.connMtx.Unlock() + + if prevConn != nil { + return prevConn.Close() + } + + return nil +} + +// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning. +func (c *reconnectingUDPConn) Write(b []byte) (int, error) { + var bytesWritten int + var err error + + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + if conn == nil { + // if connection is not initialized indicate this with err in order to hook into retry logic + err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved") + } else { + bytesWritten, err = conn.Write(b) + } + + if err == nil { + return bytesWritten, nil + } + + // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again + if reconnErr := c.attemptResolveAndDial(); reconnErr == nil { + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + return conn.Write(b) + } + + // return original error if reconn fails + return bytesWritten, err +} + +// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation. +func (c *reconnectingUDPConn) Close() error { + close(c.closeChan) + + // acquire rw lock before closing conn to ensure calls to Write drain + c.connMtx.Lock() + defer c.connMtx.Unlock() + + if c.conn != nil { + return c.conn.Close() + } + + return nil +} + +// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held +// and SetWriteBuffer is called store bufferBytes to be set for new conns. +func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error { + var err error + + c.connMtx.RLock() + conn := c.conn + c.connMtx.RUnlock() + + if conn != nil { + err = c.conn.SetWriteBuffer(bytes) + } + + if err == nil { + atomic.StoreInt64(&c.bufferBytes, int64(bytes)) + } + + return err +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go new file mode 100644 index 0000000000..f65e3a6782 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/jaeger/uploader.go @@ -0,0 +1,339 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger" + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "net/http" + "time" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + + gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger" + "go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift" +) + +// batchUploader send a batch of spans to Jaeger. +type batchUploader interface { + upload(context.Context, *gen.Batch) error + shutdown(context.Context) error +} + +// EndpointOption configures a Jaeger endpoint. +type EndpointOption interface { + newBatchUploader() (batchUploader, error) +} + +type endpointOptionFunc func() (batchUploader, error) + +func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) { + return fn() +} + +// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent +// over compact thrift protocol. This will use the following environment variables for +// configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host +// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port +// +// The passed options will take precedence over any environment variables and default values +// will be used if neither are provided. +func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := agentEndpointConfig{ + agentClientUDPParams{ + AttemptReconnecting: true, + Host: envOr(envAgentHost, "localhost"), + Port: envOr(envAgentPort, "6831"), + }, + } + for _, opt := range options { + cfg = opt.apply(cfg) + } + + client, err := newAgentClientUDP(cfg.agentClientUDPParams) + if err != nil { + return nil, err + } + + return &agentUploader{client: client}, nil + }) +} + +// AgentEndpointOption configures a Jaeger agent endpoint. +type AgentEndpointOption interface { + apply(agentEndpointConfig) agentEndpointConfig +} + +type agentEndpointConfig struct { + agentClientUDPParams +} + +type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig + +func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig { + return fn(cfg) +} + +// WithAgentHost sets a host to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable. +// If this option is not passed and the env var is not set, "localhost" will be used by default. +func WithAgentHost(host string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.Host = host + return o + }) +} + +// WithAgentPort sets a port to be used in the agent client endpoint. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable. +// If this option is not passed and the env var is not set, "6831" will be used by default. +func WithAgentPort(port string) AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.Port = port + return o + }) +} + +var emptyLogger = logr.Logger{} + +// WithLogger sets a logger to be used by agent client. +// WithLogger and WithLogr will overwrite each other. +func WithLogger(logger *log.Logger) AgentEndpointOption { + return WithLogr(stdr.New(logger)) +} + +// WithLogr sets a logr.Logger to be used by agent client. +// WithLogr and WithLogger will overwrite each other. +func WithLogr(logger logr.Logger) AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.Logger = logger + return o + }) +} + +// WithDisableAttemptReconnecting sets option to disable reconnecting udp client. +func WithDisableAttemptReconnecting() AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.AttemptReconnecting = false + return o + }) +} + +// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint. +func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.AttemptReconnectInterval = interval + return o + }) +} + +// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent. +func WithMaxPacketSize(size int) AgentEndpointOption { + return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig { + o.MaxPacketSize = size + return o + }) +} + +// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will +// use the following environment variables for configuration if no explicit option is provided: +// +// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector. +// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint. +// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint. +// +// The passed options will take precedence over any environment variables. +// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used. +// If neither values are provided for the username or the password, they will not be set since there is no default. +func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption { + return endpointOptionFunc(func() (batchUploader, error) { + cfg := collectorEndpointConfig{ + endpoint: envOr(envEndpoint, "http://localhost:14268/api/traces"), + username: envOr(envUser, ""), + password: envOr(envPassword, ""), + httpClient: http.DefaultClient, + } + + for _, opt := range options { + cfg = opt.apply(cfg) + } + + return &collectorUploader{ + endpoint: cfg.endpoint, + username: cfg.username, + password: cfg.password, + httpClient: cfg.httpClient, + }, nil + }) +} + +// CollectorEndpointOption configures a Jaeger collector endpoint. +type CollectorEndpointOption interface { + apply(collectorEndpointConfig) collectorEndpointConfig +} + +type collectorEndpointConfig struct { + // endpoint for sending spans directly to a collector. + endpoint string + + // username to be used for authentication with the collector endpoint. + username string + + // password to be used for authentication with the collector endpoint. + password string + + // httpClient to be used to make requests to the collector endpoint. + httpClient *http.Client +} + +type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig + +func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig { + return fn(cfg) +} + +// WithEndpoint is the URL for the Jaeger collector that spans are sent to. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable. +// If this option is not passed and the environment variable is not set, +// "http://localhost:14268/api/traces" will be used by default. +func WithEndpoint(endpoint string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { + o.endpoint = endpoint + return o + }) +} + +// WithUsername sets the username to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_USER environment variable. +// If this option is not passed and the environment variable is not set, no username will be set. +func WithUsername(username string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { + o.username = username + return o + }) +} + +// WithPassword sets the password to be used in the authorization header sent for all requests to the collector. +// This option overrides any value set for the +// OTEL_EXPORTER_JAEGER_PASSWORD environment variable. +// If this option is not passed and the environment variable is not set, no password will be set. +func WithPassword(password string) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { + o.password = password + return o + }) +} + +// WithHTTPClient sets the http client to be used to make request to the collector endpoint. +func WithHTTPClient(client *http.Client) CollectorEndpointOption { + return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig { + o.httpClient = client + return o + }) +} + +// agentUploader implements batchUploader interface sending batches to +// Jaeger through the UDP agent. +type agentUploader struct { + client *agentClientUDP +} + +var _ batchUploader = (*agentUploader)(nil) + +func (a *agentUploader) shutdown(ctx context.Context) error { + done := make(chan error, 1) + go func() { + done <- a.client.Close() + }() + + select { + case <-ctx.Done(): + // Prioritize not blocking the calling thread and just leak the + // spawned goroutine to close the client. + return ctx.Err() + case err := <-done: + return err + } +} + +func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error { + return a.client.EmitBatch(ctx, batch) +} + +// collectorUploader implements batchUploader interface sending batches to +// Jaeger through the collector http endpoint. +type collectorUploader struct { + endpoint string + username string + password string + httpClient *http.Client +} + +var _ batchUploader = (*collectorUploader)(nil) + +func (c *collectorUploader) shutdown(ctx context.Context) error { + // The Exporter will cancel any active exports and will prevent all + // subsequent exports, so nothing to do here. + return nil +} + +func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error { + body, err := serialize(batch) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body) + if err != nil { + return err + } + if c.username != "" && c.password != "" { + req.SetBasicAuth(c.username, c.password) + } + req.Header.Set("Content-Type", "application/x-thrift") + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + + _, _ = io.Copy(io.Discard, resp.Body) + if err = resp.Body.Close(); err != nil { + return err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) + } + return nil +} + +func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { + buf := thrift.NewTMemoryBuffer() + if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil { + return nil, err + } + return buf.Buffer, nil +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 0000000000..9a58fb1d37 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 0000000000..ecd363ab51 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "log" + "os" + "sync/atomic" + "unsafe" +) + +var ( + // globalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + globalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*delegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*errLogger)(nil) +) + +type delegator struct { + delegate unsafe.Pointer +} + +func (d *delegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *delegator) setDelegate(eh ErrorHandler) { + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) +} + +func defaultErrorHandler() *delegator { + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// errLogger logs errors if no delegate is set, otherwise they are delegated. +type errLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *errLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + globalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000000..622c3ee3f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) + return cp.Elem().Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 0000000000..b96e5408e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 0000000000..4469700d9c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 0000000000..293c08961f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + "unsafe" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Info messages use a logger with `l.V(1).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true`. +func SetLogger(l logr.Logger) { + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less then 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(5).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 0000000000..06bac35c2f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 0000000000..1ad38f828e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 0000000000..5f008d0982 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 0000000000..e07e794000 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 0000000000..c4f8acd5d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 0000000000..778ad2d748 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 0000000000..bd6f434372 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides an implementation of the metrics part of the +OpenTelemetry API. + +This package is currently in a pre-GA phase. Backwards incompatible changes +may be introduced in subsequent minor version releases as we work to track the +evolving OpenTelemetry specification and user feedback. +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go new file mode 100644 index 0000000000..cb0896d38a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/global/global.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/global" + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/internal/global" +) + +// Meter returns a Meter from the global MeterProvider. The +// instrumentationName must be the name of the library providing +// instrumentation. This name may be the same as the instrumented code only if +// that code provides built-in instrumentation. If the instrumentationName is +// empty, then a implementation defined default name will be used instead. +// +// This is short for MeterProvider().Meter(name). +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return MeterProvider().Meter(instrumentationName, opts...) +} + +// MeterProvider returns the registered global meter provider. +// If none is registered then a No-op MeterProvider is returned. +func MeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers `mp` as the global meter provider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go new file mode 100644 index 0000000000..0b5d5a99c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observable interface { + Asynchronous + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableCounter interface{ Float64Observable } + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableUpDownCounter interface{ Float64Observable } + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Float64ObservableGauge interface{ Float64Observable } + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Observer interface { + Observe(value float64, attributes ...attribute.KeyValue) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObserverConfig contains options for Asynchronous instruments that +// observe float64 values. +type Float64ObserverConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObserverConfig returns a new Float64ObserverConfig with all opts +// applied. +func NewFloat64ObserverConfig(opts ...Float64ObserverOption) Float64ObserverConfig { + var config Float64ObserverConfig + for _, o := range opts { + config = o.applyFloat64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Float64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64ObserverConfig) Unit() string { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Float64ObserverConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObserverOption applies options to float64 Observer instruments. +type Float64ObserverOption interface { + applyFloat64Observer(Float64ObserverConfig) Float64ObserverConfig +} + +type float64ObserverOptionFunc func(Float64ObserverConfig) Float64ObserverConfig + +func (fn float64ObserverOptionFunc) applyFloat64Observer(cfg Float64ObserverConfig) Float64ObserverConfig { + return fn(cfg) +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObserverOption { + return float64ObserverOptionFunc(func(cfg Float64ObserverConfig) Float64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go new file mode 100644 index 0000000000..05feeacb05 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observable interface { + Asynchronous + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableCounter interface{ Int64Observable } + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableUpDownCounter interface{ Int64Observable } + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: methods may be added to this interface in minor releases. +type Int64ObservableGauge interface{ Int64Observable } + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Observer interface { + Observe(value int64, attributes ...attribute.KeyValue) +} + +// Int64Callback is a function registered with a Meter that makes +// observations for a Int64Observerable instrument it is registered with. +// Calls to the Int64Observer record measurement values for the +// Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callback. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObserverConfig contains options for Asynchronous instruments that +// observe int64 values. +type Int64ObserverConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObserverConfig returns a new Int64ObserverConfig with all opts +// applied. +func NewInt64ObserverConfig(opts ...Int64ObserverOption) Int64ObserverConfig { + var config Int64ObserverConfig + for _, o := range opts { + config = o.applyInt64Observer(config) + } + return config +} + +// Description returns the Config description. +func (c Int64ObserverConfig) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64ObserverConfig) Unit() string { + return c.unit +} + +// Callbacks returns the Config callbacks. +func (c Int64ObserverConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObserverOption applies options to int64 Observer instruments. +type Int64ObserverOption interface { + applyInt64Observer(Int64ObserverConfig) Int64ObserverConfig +} + +type int64ObserverOptionFunc func(Int64ObserverConfig) Int64ObserverConfig + +func (fn int64ObserverOptionFunc) applyInt64Observer(cfg Int64ObserverConfig) Int64ObserverConfig { + return fn(cfg) +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObserverOption { + return int64ObserverOptionFunc(func(cfg Int64ObserverConfig) Int64ObserverConfig { + cfg.callbacks = append(cfg.callbacks, callback) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go new file mode 100644 index 0000000000..f6dd9e890f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +// Asynchronous instruments are instruments that are updated within a Callback. +// If an instrument is observed outside of it's callback it should be an error. +// +// This interface is used as a grouping mechanism. +type Asynchronous interface { + asynchronous() +} + +// Synchronous instruments are updated in line with application code. +// +// This interface is used as a grouping mechanism. +type Synchronous interface { + synchronous() +} + +// Option applies options to all instruments. +type Option interface { + Float64ObserverOption + Int64ObserverOption + Float64Option + Int64Option +} + +type descOpt string + +func (o descOpt) applyFloat64(c Float64Config) Float64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64(c Int64Config) Int64Config { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) Option { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64(c Float64Config) Float64Config { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64(c Int64Config) Int64Config { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Observer(c Float64ObserverConfig) Float64ObserverConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Observer(c Int64ObserverConfig) Int64ObserverConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +func WithUnit(u string) Option { return unitOpt(u) } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go new file mode 100644 index 0000000000..2cdfeb2691 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Float64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Float64Config contains options for Asynchronous instruments that +// observe float64 values. +type Float64Config struct { + description string + unit string +} + +// Float64Config contains options for Synchronous instruments that record +// float64 values. +func NewFloat64Config(opts ...Float64Option) Float64Config { + var config Float64Config + for _, o := range opts { + config = o.applyFloat64(config) + } + return config +} + +// Description returns the Config description. +func (c Float64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Float64Config) Unit() string { + return c.unit +} + +// Float64Option applies options to synchronous float64 instruments. +type Float64Option interface { + applyFloat64(Float64Config) Float64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go new file mode 100644 index 0000000000..e212c6d695 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: methods may be added to this interface in minor releases. +type Int64Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + Synchronous +} + +// Int64Config contains options for Synchronous instruments that record int64 +// values. +type Int64Config struct { + description string + unit string +} + +// NewInt64Config returns a new Int64Config with all opts +// applied. +func NewInt64Config(opts ...Int64Option) Int64Config { + var config Int64Config + for _, o := range opts { + config = o.applyInt64(config) + } + return config +} + +// Description returns the Config description. +func (c Int64Config) Description() string { + return c.description +} + +// Unit returns the Config unit. +func (c Int64Config) Unit() string { + return c.unit +} + +// Int64Option applies options to synchronous int64 instruments. +type Int64Option interface { + applyInt64(Int64Config) Int64Config +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go new file mode 100644 index 0000000000..d1480fa5f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go @@ -0,0 +1,355 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() instrument.Asynchronous +} + +type afCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableCounter +} + +var _ unwrapper = (*afCounter)(nil) +var _ instrument.Float64ObservableCounter = (*afCounter)(nil) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableUpDownCounter +} + +var _ unwrapper = (*afUpDownCounter)(nil) +var _ instrument.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + instrument.Float64Observable + + name string + opts []instrument.Float64ObserverOption + + delegate atomic.Value //instrument.Float64ObservableGauge +} + +var _ unwrapper = (*afGauge)(nil) +var _ instrument.Float64ObservableGauge = (*afGauge)(nil) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableCounter +} + +var _ unwrapper = (*aiCounter)(nil) +var _ instrument.Int64ObservableCounter = (*aiCounter)(nil) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableUpDownCounter +} + +var _ unwrapper = (*aiUpDownCounter)(nil) +var _ instrument.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + instrument.Int64Observable + + name string + opts []instrument.Int64ObserverOption + + delegate atomic.Value //instrument.Int64ObservableGauge +} + +var _ unwrapper = (*aiGauge)(nil) +var _ instrument.Int64ObservableGauge = (*aiGauge)(nil) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(instrument.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Counter + + instrument.Synchronous +} + +var _ instrument.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Counter).Add(ctx, incr, attrs...) + } +} + +type sfUpDownCounter struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64UpDownCounter).Add(ctx, incr, attrs...) + } +} + +type sfHistogram struct { + name string + opts []instrument.Float64Option + + delegate atomic.Value //instrument.Float64Histogram + + instrument.Synchronous +} + +var _ instrument.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Float64Histogram).Record(ctx, x, attrs...) + } +} + +type siCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Counter + + instrument.Synchronous +} + +var _ instrument.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Counter).Add(ctx, x, attrs...) + } +} + +type siUpDownCounter struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64UpDownCounter + + instrument.Synchronous +} + +var _ instrument.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64UpDownCounter).Add(ctx, x, attrs...) + } +} + +type siHistogram struct { + name string + opts []instrument.Int64Option + + delegate atomic.Value //instrument.Int64Histogram + + instrument.Synchronous +} + +var _ instrument.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(instrument.Int64Histogram).Record(ctx, x, attrs...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go new file mode 100644 index 0000000000..8acf632863 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +type il struct { + name string + version string +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...instrument.Asynchronous) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() instrument.Asynchronous +} + +func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { + out := make([]instrument.Asynchronous, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + instruments []instrument.Asynchronous + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + otel.Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go new file mode 100644 index 0000000000..47c0d787d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// htmp://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +var ( + globalMeterProvider = defaultMeterProvider() + + delegateMeterOnce sync.Once +) + +type meterProviderHolder struct { + mp metric.MeterProvider +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + global.Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 0000000000..2f69d2ae54 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or library. +// +// Warning: methods may be added to this interface in minor releases. +type MeterProvider interface { + // Meter creates an instance of a `Meter` interface. The name must be the + // name of the library providing instrumentation. This name may be the same + // as the instrumented code only if that code provides built-in + // instrumentation. If the name is empty, then a implementation defined + // default name will be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. +type Meter interface { + // Int64Counter returns a new instrument identified by name and configured + // with options. The instrument is used to synchronously record increasing + // int64 measurements during a computational operation. + Int64Counter(name string, options ...instrument.Int64Option) (instrument.Int64Counter, error) + // Int64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // int64 measurements during a computational operation. + Int64UpDownCounter(name string, options ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) + // Int64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of int64 measurements during a computational operation. + Int64Histogram(name string, options ...instrument.Int64Option) (instrument.Int64Histogram, error) + // Int64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing int64 measurements once per a measurement collection cycle. + Int64ObservableCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new instrument identified by name + // and configured with options. The instrument is used to asynchronously + // record int64 measurements once per a measurement collection cycle. + Int64ObservableUpDownCounter(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous int64 measurements once per a measurement collection + // cycle. + Int64ObservableGauge(name string, options ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) + + // Float64Counter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // increasing float64 measurements during a computational operation. + Float64Counter(name string, options ...instrument.Float64Option) (instrument.Float64Counter, error) + // Float64UpDownCounter returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // float64 measurements during a computational operation. + Float64UpDownCounter(name string, options ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) + // Float64Histogram returns a new instrument identified by name and + // configured with options. The instrument is used to synchronously record + // the distribution of float64 measurements during a computational + // operation. + Float64Histogram(name string, options ...instrument.Float64Option) (instrument.Float64Histogram, error) + // Float64ObservableCounter returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // increasing float64 measurements once per a measurement collection cycle. + Float64ObservableCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new instrument identified by + // name and configured with options. The instrument is used to + // asynchronously record float64 measurements once per a measurement + // collection cycle. + Float64ObservableUpDownCounter(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // instantaneous float64 measurements once per a measurement collection + // cycle. + Float64ObservableGauge(name string, options ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + RegisterCallback(f Callback, instruments ...instrument.Asynchronous) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurment observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +type Observer interface { + // ObserveFloat64 records the float64 value with attributes for obsrv. + ObserveFloat64(obsrv instrument.Float64Observable, value float64, attributes ...attribute.KeyValue) + // ObserveInt64 records the int64 value with attributes for obsrv. + ObserveInt64(obsrv instrument.Int64Observable, value int64, attributes ...attribute.KeyValue) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +type Registration interface { + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go new file mode 100644 index 0000000000..f38619e39a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. +func NewNoopMeterProvider() MeterProvider { + return noopMeterProvider{} +} + +type noopMeterProvider struct{} + +func (noopMeterProvider) Meter(string, ...MeterOption) Meter { + return noopMeter{} +} + +// NewNoopMeter creates a Meter that does not record any metrics. +func NewNoopMeter() Meter { + return noopMeter{} +} + +type noopMeter struct{} + +func (noopMeter) Int64Counter(string, ...instrument.Int64Option) (instrument.Int64Counter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64UpDownCounter(string, ...instrument.Int64Option) (instrument.Int64UpDownCounter, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64Histogram(string, ...instrument.Int64Option) (instrument.Int64Histogram, error) { + return nonrecordingSyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableUpDownCounter(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableUpDownCounter, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Int64ObservableGauge(string, ...instrument.Int64ObserverOption) (instrument.Int64ObservableGauge, error) { + return nonrecordingAsyncInt64Instrument{}, nil +} + +func (noopMeter) Float64Counter(string, ...instrument.Float64Option) (instrument.Float64Counter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64UpDownCounter(string, ...instrument.Float64Option) (instrument.Float64UpDownCounter, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64Histogram(string, ...instrument.Float64Option) (instrument.Float64Histogram, error) { + return nonrecordingSyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableUpDownCounter(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableUpDownCounter, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +func (noopMeter) Float64ObservableGauge(string, ...instrument.Float64ObserverOption) (instrument.Float64ObservableGauge, error) { + return nonrecordingAsyncFloat64Instrument{}, nil +} + +// RegisterCallback creates a register callback that does not record any metrics. +func (noopMeter) RegisterCallback(Callback, ...instrument.Asynchronous) (Registration, error) { + return noopReg{}, nil +} + +type noopReg struct{} + +func (noopReg) Unregister() error { return nil } + +type nonrecordingAsyncFloat64Instrument struct { + instrument.Float64Observable +} + +var ( + _ instrument.Float64ObservableCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableUpDownCounter = nonrecordingAsyncFloat64Instrument{} + _ instrument.Float64ObservableGauge = nonrecordingAsyncFloat64Instrument{} +) + +type nonrecordingAsyncInt64Instrument struct { + instrument.Int64Observable +} + +var ( + _ instrument.Int64ObservableCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableUpDownCounter = nonrecordingAsyncInt64Instrument{} + _ instrument.Int64ObservableGauge = nonrecordingAsyncInt64Instrument{} +) + +type nonrecordingSyncFloat64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Float64Counter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64UpDownCounter = nonrecordingSyncFloat64Instrument{} + _ instrument.Float64Histogram = nonrecordingSyncFloat64Instrument{} +) + +func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) {} +func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) {} + +type nonrecordingSyncInt64Instrument struct { + instrument.Synchronous +} + +var ( + _ instrument.Int64Counter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64UpDownCounter = nonrecordingSyncInt64Instrument{} + _ instrument.Int64Histogram = nonrecordingSyncInt64Instrument{} +) + +func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) {} +func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) {} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 0000000000..d29aaa32c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 0000000000..303cdf1cbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 0000000000..c119eb2858 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 0000000000..c94438f73a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 0000000000..902692da08 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "regexp" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var _ TextMapPropagator = TraceContext{} +var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", + supportedVersion, + sc.TraceID(), + sc.SpanID(), + flags) + carrier.Set(traceparentHeader, h) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + matches := traceCtxRegExp.FindStringSubmatch(h) + + if len(matches) == 0 { + return trace.SpanContext{} + } + + if len(matches) < 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[1]) != 2 { + return trace.SpanContext{} + } + ver, err := hex.DecodeString(matches[1]) + if err != nil { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + if version == 0 && len(matches) != 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[2]) != 32 { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[3]) != 16 { + return trace.SpanContext{} + } + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[4]) != 2 { + return trace.SpanContext{} + } + opts, err := hex.DecodeString(matches[4]) + if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { + return trace.SpanContext{} + } + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 0000000000..6e923acab4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 0000000000..39f025a171 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// Deprecated: please use Scope instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 0000000000..09c6d93f6d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 0000000000..5e94b8ae52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // EnvBatchSpanProcessorMaxQueueSize. + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value, ok := os.LookupEnv(key) + if !ok { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value, ok := os.LookupEnv(key) + if !ok { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go new file mode 100644 index 0000000000..84a02306e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/sdk/internal" + +import ( + "fmt" + "time" + + "go.opentelemetry.io/otel" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 0000000000..c1d220408a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" +) + +var ( + // ErrPartialResource is returned by a detector when complete source + // information for a Resource is unavailable or the source information + // contains invalid values that are omitted from the returned Resource. + ErrPartialResource = errors.New("partial resource") +) + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect calls all input detectors sequentially and merges each result with the previous one. +// It returns the merged error too. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + var autoDetectedRes *Resource + var errInfo []string + for _, detector := range detectors { + if detector == nil { + continue + } + res, err := detector.Detect(ctx) + if err != nil { + errInfo = append(errInfo, err.Error()) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + autoDetectedRes, err = Merge(autoDetectedRes, res) + if err != nil { + errInfo = append(errInfo, err.Error()) + } + } + + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("detecting resources: %s", errInfo) + } + return autoDetectedRes, aggregatedError +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 0000000000..34a474891a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKNameKey.String("opentelemetry"), + semconv.TelemetrySDKLanguageKey.String("go"), + semconv.TelemetrySDKVersionKey.String(otel.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 0000000000..8e212b1218 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 0000000000..6f7fd005b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 0000000000..9aab3d8393 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 0000000000..deebe363a1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +var ( + // errMissingValue is returned when a resource value is missing. + errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) +) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + attrs := []attribute.KeyValue{} + var invalid []string + for _, p := range pairs { + field := strings.SplitN(p, "=", 2) + if len(field) != 2 { + invalid = append(invalid, p) + continue + } + k := strings.TrimSpace(field[0]) + v, err := url.QueryUnescape(strings.TrimSpace(field[1])) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + v = field[1] + otel.Handle(err) + } + attrs = append(attrs, attribute.String(k, v)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 0000000000..ac520dd867 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type osTypeDetector struct{} +type osDescriptionDetector struct{} + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescriptionKey.String(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 0000000000..24ec85793d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 0000000000..fba6790e44 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + parts := strings.SplitN(line, "=", 2) + + if len(parts) != 2 || len(parts[0]) == 0 { + return "", "", false + } + + key := strings.TrimSpace(parts[0]) + value := unescape(unquote(strings.TrimSpace(parts[1]))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 0000000000..1c84afc185 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 0000000000..3ebcb534f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !aix +// +build !darwin +// +build !dragonfly +// +build !freebsd +// +build !linux +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows +// +build !zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 0000000000..faad64d8da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 0000000000..7eaddd34bf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type pidProvider func() int +type executablePathProvider func() (string, error) +type commandArgsProvider func() []string +type ownerProvider func() (*user.User, error) +type runtimeNameProvider func() string +type runtimeVersionProvider func() string +type runtimeOSProvider func() string +type runtimeArchProvider func() string + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type processPIDDetector struct{} +type processExecutableNameDetector struct{} +type processExecutablePathDetector struct{} +type processCommandArgsDetector struct{} +type processOwnerDetector struct{} +type processRuntimeNameDetector struct{} +type processRuntimeVersionDetector struct{} +type processRuntimeDescriptionDetector struct{} + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 0000000000..c425ff05db --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,282 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once +) + +var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") + +// New returns a Resource combined from the user-provided detectors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + resource, err := Detect(ctx, cfg.detectors...) + + var err2 error + resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return resource, err +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &emptyResource + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &emptyResource + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new resource by combining resource a and b. +// +// If there are common keys between resource a and b, then the value +// from resource b will overwrite the value from resource a, even +// if resource b's value is empty. +// +// The SchemaURL of the resources will be merged according to the spec rules: +// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge +// If the resources have different non-empty schemaURL an empty resource and an error +// will be returned. +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Merge the schema URL. + var schemaURL string + switch true { + case a.schemaURL == "": + schemaURL = b.schemaURL + case b.schemaURL == "": + schemaURL = a.schemaURL + case a.schemaURL == b.schemaURL: + schemaURL = a.schemaURL + default: + return Empty(), errMergeConflictSchemaURL + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + merged := NewWithAttributes(schemaURL, combine...) + return merged, nil +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &emptyResource +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultResource, err = Detect( + context.Background(), + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &emptyResource + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 0000000000..a2d7db4900 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "runtime" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchSpanProcessorOption configures a BatchSpanProcessor. +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will preform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + <-bsp.timer.C + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if sd == nil { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + close(bsp.queue) + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } +} + +func recoverSendOnClosedChan() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: + } + + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 0000000000..0285e99be0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 0000000000..1e3b426757 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 0000000000..d1c86e59b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) evictedQueue { + // Do not pre-allocate queue, do this lazily. + return evictedQueue{capacity: capacity} +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue) add(value interface{}) { + if eq.capacity == 0 { + eq.droppedCount++ + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 0000000000..bba246041a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + _, _ = gen.randSource.Read(sid[:]) + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + _, _ = gen.randSource.Read(tid[:]) + sid := trace.SpanID{} + _, _ = gen.randSource.Read(sid[:]) + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 0000000000..19cfea4ba4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 0000000000..201c178170 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,461 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig. +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. +type TracerProvider struct { + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer + spanProcessors atomic.Value + isShutdown bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Scope]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + global.Info("TracerProvider created", "config", o) + + spss := spanProcessorStates{} + for _, sp := range o.processors { + spss = append(spss, newSpanProcessorState(sp)) + } + tp.spanProcessors.Store(spss) + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + c := trace.NewTracerConfig(opts...) + + p.mu.Lock() + defer p.mu.Unlock() + if name == "" { + name = defaultTracerName + } + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + t, ok := p.namedTracer[is] + if !ok { + t = &tracer{ + provider: p, + instrumentationScope: is, + } + p.namedTracer[is] = t + global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + if p.isShutdown { + return + } + newSPS := spanProcessorStates{} + newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + if p.isShutdown { + return + } + old := p.spanProcessors.Load().(spanProcessorStates) + if len(old) == 0 { + return + } + spss := spanProcessorStates{} + spss = append(spss, old...) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss := p.spanProcessors.Load().(spanProcessorStates) + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + spss := p.spanProcessors.Load().(spanProcessorStates) + if len(spss) == 0 { + return nil + } + + p.mu.Lock() + defer p.mu.Unlock() + p.isShutdown = true + + var retErr error + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } + } + } + p.spanProcessors.Store(spanProcessorStates{}) + return retErr +} + +// TracerProviderOption configures a TracerProvider. +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 0000000000..02053b318a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 0000000000..5ee9715d27 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,293 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions. +const ( + // Drop will not record the span and all attributes/events will be dropped. + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set. + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set. + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +// +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a composite sampler which behaves differently, +// based on the parent of the span. If the span has no parent, +// the root(Sampler) is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 0000000000..e8530a9593 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.RWMutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor make it good for testing, debugging, or +// showing examples of other feature, but it will be slow and have a high +// computation resource usage overhead. The BatchSpanProcessor is recommended +// for production use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.RLock() + defer ssp.exporterMu.RUnlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a + // potential deadlock. If the exporter shut down operation generates a + // span, that span would need to be exported. Meaning, OnEnd would be + // called and try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down + // and the context to be done simultaneously. In that case this + // outer select statement will randomly choose a case. This will + // result in a different returned error for similar scenarios. + // Instead, double check if the exporter shut down at the same + // time and return that error if so. This will ensure consistency + // as well as ensure the caller knows the exporter shut down + // successfully (they can already determine if the deadline is + // expired given they passed the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 0000000000..0349b2f198 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 0000000000..5abb0b274d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,828 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + // Deprecated: please use InstrumentationScope instead. + InstrumentationLibrary() instrumentation.Library + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var _ ReadWriteSpan = (*recordingSpan)(nil) +var _ runtimeTracer = (*recordingSpan)(nil) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.status = status +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.droppedAttributes += len(attributes) + return + } + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + } +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the limit. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= limit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.droppedAttributes++ + } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is perfromed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := internal.MonotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionTypeKey.String(typeStr(recovered)), + semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates) + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.IsRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionTypeKey.String(typeStr(err)), + semconv.ExceptionMessageKey.String(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if !s.IsRecording() { + return + } + s.addEvent(name, o...) +} + +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] + } + + s.mu.Lock() + s.events.add(e) + s.mu.Unlock() +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := (*record)[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + (*record)[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.interfaceArrayToLinksArray() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.interfaceArrayToEventArray() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) addLink(link trace.Link) { + if !s.IsRecording() || !link.SpanContext.IsValid() { + return + } + + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] + } + + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationScope = s.tracer.instrumentationScope + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.interfaceArrayToEventArray() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.interfaceArrayToLinksArray() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) interfaceArrayToLinksArray() []Link { + linkArr := make([]Link, 0) + for _, value := range s.links.queue { + linkArr = append(linkArr, value.(Link)) + } + return linkArr +} + +func (s *recordingSpan) interfaceArrayToEventArray() []Event { + eventArr := make([]Event, 0) + for _, value := range s.events.queue { + eventArr = append(eventArr, value.(Event)) + } + return eventArr +} + +func (s *recordingSpan) addChild() { + if !s.IsRecording() { + return + } + s.mu.Lock() + s.childSpanCount++ + s.mu.Unlock() +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 0000000000..9fb3d6eac3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to preform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 0000000000..aa4d4221db --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 0000000000..e6ae193521 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state *sync.Once +} + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp, state: &sync.Once{}} +} + +type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 0000000000..f17d924b89 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,161 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" +) + +type tracer struct { + provider *TracerProvider + instrumentationScope instrumentation.Scope +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps := tr.provider.spanProcessors.Load().(spanProcessorStates) + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.addLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 0000000000..b580eedeff --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,336 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go new file mode 100644 index 0000000000..181fcc9c52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.12.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go new file mode 100644 index 0000000000..d689270943 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go new file mode 100644 index 0000000000..4b4f3cbaf0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) + +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + return sc.NetAttributesFromHTTPRequest(network, request) +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.EndUserAttributesFromHTTPRequest(request) +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.HTTPClientAttributesFromHTTPRequest(request) +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + return sc.HTTPAttributesFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go new file mode 100644 index 0000000000..b2155676f4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go @@ -0,0 +1,1042 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.brands). + BrowserBrandsKey = attribute.Key("browser.brands") + // The platform on which the browser is running + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.platform). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in the + // [os.type and os.name attributes](./os.md). However, for consistency, the values + // in the `browser.platform` attribute should capture the exact value that the + // user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + // Full user-agent string provided by the browser + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` + // API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- + // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- + // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- + // us/global-infrastructure/geographies/), [Google Cloud + // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + // The name of the device manufacturer + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) + // span attributes). + + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting `faas.id` as a span attribute instead. + + // The exact value to use for `faas.id` depends on the cloud provider: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id) of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.We + // b/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name + // (`container.name`). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go new file mode 100644 index 0000000000..2f2a019e43 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go new file mode 100644 index 0000000000..047d8e95cc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go @@ -0,0 +1,1704 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec + // .md#id) uniquely identifies the event. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m + // d#source-1) identifies the context in which an event happened. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + // The [version of the CloudEvents specification](https://github.com/cloudevents/s + // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp + // ec.md#type) contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. + // md#subject) of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Required: Required, if applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". In case there are multiple layers that could be considered for database + // name (e.g. Oracle instance name and schema name), the database name to be used + // is the more specific layer (e.g. Oracle schema name). + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#recording-an-exception). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header + // should also be reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned [section of RFC + // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not + // set the attribute MUST NOT be set. + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") + // The ordinal number of request re-sending attempt. + // + // Type: int + // Required: If and only if a request was retried. + // Stability: stable + // Examples: 3 + HTTPRetryCountKey = attribute.Key("http.retry_count") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.peer.ip`, is available even if that other + // source just confirms the same value as `net.peer.ip`. + // Rationale: For `net.peer.ip`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.peer.ip` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer_group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // The unique identifier for each client. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + // Type of message. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") + // The secondary classifier of message besides topic. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + // MUST be calculated as two different counters starting from `1` one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 0000000000..71a1f7748d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 0000000000..679c40c4de --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 0000000000..9b8c559de4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 0000000000..d5c4b5c136 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 0000000000..39a2eab3a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,2010 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 0000000000..42fc525d16 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 0000000000..8c4a7299d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3375 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 0000000000..caf7249de8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 0000000000..cb3efbb9ad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,333 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 0000000000..76f9a083c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 0000000000..ab0346f966 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 0000000000..88fcb81611 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 0000000000..73950f2077 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{} + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that preforms no operations. +type noopTracer struct{} + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that preforms no operations. +type noopSpan struct{} + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 0000000000..4aa94f79f4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,551 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var nilTraceID TraceID +var _ json.Marshaler = nilTraceID + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var nilSpanID SpanID +var _ json.Marshaler = nilSpanID + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. +type Span interface { + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. +type Tracer interface { + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: methods may be added to this interface in minor releases. +type TracerProvider interface { + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 0000000000..ca68a82e5f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) + } + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 0000000000..dbb61a4227 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 0000000000..0e8e5e0232 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.14.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 0000000000..40df1fae41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,57 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.14.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/jaeger + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/otlp/internal/retry + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/trace + - go.opentelemetry.io/otel/sdk + experimental-metrics: + version: v0.37.0 + modules: + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/view + experimental-schema: + version: v0.0.4 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000000..6c8d97b6a5 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000000..ee74927d3c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate +// +build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000000..4173351230 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000000..2789f6f18d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,387 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000000..fc1835d8a2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e9..a6b5081888 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go deleted file mode 100644 index d10ad66533..0000000000 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package field_mask aliases all exported identifiers in -// package "google.golang.org/protobuf/types/known/fieldmaskpb". -package field_mask - -import "google.golang.org/protobuf/types/known/fieldmaskpb" - -type FieldMask = fieldmaskpb.FieldMask - -var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14..02f5dc5318 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5cc..29475e31c9 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f4f9408f38..09d61dd1b5 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -110,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -274,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metatada metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -371,3 +384,21 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e8dfc828aa..3929c26d31 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go index a87b6809af..c334135810 100644 --- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct { // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { @@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numIdle += updateVal } } + return cse.CurrentState() +} +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { // Evaluate. if cse.numReady > 0 { return connectivity.Ready diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index b1c23eaae0..0359956d36 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,17 +19,20 @@ package grpc import ( + "context" "fmt" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() @@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf..66d141fce7 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go index a220c47c59..32b7fa5794 100644 --- a/vendor/google.golang.org/grpc/channelz/channelz.go +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -23,7 +23,7 @@ // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by // the `internal/channelz` package. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 779b03bca1..d607d4e9e2 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) if err != nil { return nil, err } @@ -503,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -831,9 +837,9 @@ func equalAddresses(a, b []resolver.Address) bool { // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, @@ -998,7 +1004,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1228,111 +1234,79 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - defer hcancel() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. return } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1402,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1583,7 +1557,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1604,39 +1578,26 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query +// resolver.Target struct containing scheme, authority and url. Query // params are stripped from the endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ Scheme: u.Scheme, Authority: u.Host, - Endpoint: endpoint, URL: *u, }, nil } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e7..5feac3aa0e 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 784822d056..877b7cd21a 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e..4866da101c 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -44,6 +44,7 @@ func init() { extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 18e530fc90..07a5861352 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f664090..5de66e40d3 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -242,7 +241,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d23e..8e29a62f16 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index c5579e6506..f9e80e27ab 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d1..d71e441778 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -26,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m @@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated // LogEntryConfig represents the configuration for binary log entry. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -159,10 +159,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -195,19 +195,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -223,7 +223,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -263,7 +263,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -300,15 +300,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -324,7 +324,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -367,15 +367,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b31..264de387c2 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index ad0ce4dabf..7b2f350e2e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f02725431..5ba9d94d49 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,15 +21,42 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index af09711a3e..04136882c7 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,7 +20,6 @@ package envconfig import ( "os" - "strings" ) const ( @@ -36,16 +35,6 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -64,38 +53,40 @@ var ( // XDSRingHash indicates whether ring hash support is enabled, which can be // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) // XDSAggregateAndDNS indicates whether processing of aggregated cluster // and DNS cluster is enabled, which can be enabled by setting the // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be disabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) // XDSRLS indicates whether processing of Cluster Specifier plugins and // support for the RLS CLuster Specifier is enabled, which can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258f..b68e26a364 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 0000000000..6635f7bca9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 0000000000..9f40909679 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index e9c4af6483..ec62b4775e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index fd0ee3dcaf..0a76d9de6e 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -77,6 +77,9 @@ var ( // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c5149..09a667f33c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -116,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1..afac56572a 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cac..1609116877 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597..51e733e495 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd..b0ead4f54f 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48f..9097385e1a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,9 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() + // Always flush the writer before exiting in case there are pending frames + // to be sent. + defer l.framer.writer.Flush() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e0..bc8ee07474 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 090120925b..e6626bf96e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -442,10 +457,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5c2f35b24e..79ee8aea0a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -57,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -99,16 +105,13 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables @@ -137,8 +140,7 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool @@ -194,7 +196,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,14 +216,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -253,15 +281,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -299,6 +319,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -315,16 +337,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts kp: kp, statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -363,21 +383,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -397,14 +428,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -417,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -507,9 +534,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -589,7 +629,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -618,7 +662,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -634,13 +685,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error @@ -655,6 +706,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -679,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -705,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -720,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() @@ -799,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } + if transportDrainRequired { + if logger.V(logLevel) { + logger.Infof("transport: t.nextID > MaxStreamID. Draining") + } + t.GracefulClose() + } return s, nil } @@ -880,20 +952,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -943,11 +1016,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1105,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) } statusCode = codes.Unknown } @@ -1223,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1482,33 +1561,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1711,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc..bc3da70672 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(map[string][]string) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) @@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 @@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if logger.V(logLevel) { logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } @@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +861,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6c3ba85159..0ac77ea4f8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -573,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -691,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 98d62e0675..fb4a88f59b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,10 +41,11 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Key must be lower-case. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 843633c910..c525dc070f 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -57,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -89,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -124,14 +131,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, dropError{error: err} + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -149,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a2..fc91b4d266 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -51,7 +51,7 @@ type pickfirstBalancer struct { func (b *pickfirstBalancer) ResolverError(err error) { if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad9..cd45547854 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 1f859f7648..ee4b04caf0 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,20 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +38,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { state protoimpl.MessageState @@ -53,6 +50,7 @@ type ServerReflectionRequest struct { // defined field and then handles them using corresponding methods. // // Types that are assignable to MessageRequest: + // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol // *ServerReflectionRequest_FileContainingExtension @@ -64,7 +62,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -77,7 +75,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -90,7 +88,7 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } func (x *ServerReflectionRequest) GetHost() string { @@ -208,7 +206,7 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -221,7 +219,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -234,7 +232,7 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } func (x *ExtensionRequest) GetContainingType() string { @@ -259,10 +257,11 @@ type ServerReflectionResponse struct { ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the - // message_request in the request. + // The server set one of the following fields according to the message_request + // in the request. // // Types that are assignable to MessageResponse: + // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse @@ -273,7 +272,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -286,7 +285,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -299,7 +298,7 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } func (x *ServerReflectionResponse) GetValidHost() string { @@ -357,8 +356,8 @@ type isServerReflectionResponse_MessageResponse interface { type ServerReflectionResponse_FileDescriptorResponse struct { // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. @@ -366,12 +365,12 @@ type ServerReflectionResponse_FileDescriptorResponse struct { } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. + // This message is used to answer all_extension_numbers_of_type requst. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. + // This message is used to answer list_services request. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } @@ -407,7 +406,7 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -420,7 +419,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -433,7 +432,7 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { @@ -459,7 +458,7 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -472,7 +471,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -485,7 +484,7 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } func (x *ExtensionNumberResponse) GetBaseTypeName() string { @@ -516,7 +515,7 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -529,7 +528,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -542,7 +541,7 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } func (x *ListServiceResponse) GetService() []*ServiceResponse { @@ -567,7 +566,7 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -580,7 +579,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -593,7 +592,7 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } func (x *ServiceResponse) GetName() string { @@ -617,7 +616,7 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -630,7 +629,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -643,7 +642,7 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } func (x *ErrorResponse) GetErrorCode() int32 { @@ -660,136 +659,139 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, - 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, - 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, - 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, - 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, - 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, - 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, - 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, - 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, - 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, - 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, - 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc ) -func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) }) - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -799,7 +801,7 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interfa (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse @@ -816,13 +818,13 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } -func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { - if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -834,7 +836,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -846,7 +848,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -858,7 +860,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -870,7 +872,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -882,7 +884,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -894,7 +896,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -906,7 +908,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -919,14 +921,14 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -936,18 +938,18 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() - File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto deleted file mode 100644 index ee2b82c0a5..0000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection - -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; - -package grpc.reflection.v1alpha; - -service ServerReflection { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - rpc ServerReflectionInfo(stream ServerReflectionRequest) - returns (stream ServerReflectionResponse); -} - -// The message sent by the client when calling ServerReflectionInfo method. -message ServerReflectionRequest { - string host = 1; - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - oneof message_request { - // Find a proto file by the file name. - string file_by_filename = 3; - - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. .[.] or .). - string file_containing_symbol = 4; - - // Find the proto file which defines an extension extending the given - // message type with the given field number. - ExtensionRequest file_containing_extension = 5; - - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // . - string all_extension_numbers_of_type = 6; - - // List the full names of registered services. The content will not be - // checked. - string list_services = 7; - } -} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -message ExtensionRequest { - // Fully-qualified type name. The format should be . - string containing_type = 1; - int32 extension_number = 2; -} - -// The message sent by the server to answer ServerReflectionInfo method. -message ServerReflectionResponse { - string valid_host = 1; - ServerReflectionRequest original_request = 2; - // The server sets one of the following fields according to the - // message_request in the request. - oneof message_response { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse file_descriptor_response = 4; - - // This message is used to answer all_extension_numbers_of_type requests. - ExtensionNumberResponse all_extension_numbers_response = 5; - - // This message is used to answer list_services requests. - ListServiceResponse list_services_response = 6; - - // This message is used when an error occurs. - ErrorResponse error_response = 7; - } -} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -message FileDescriptorResponse { - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - repeated bytes file_descriptor_proto = 1; -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -message ExtensionNumberResponse { - // Full name of the base type, including the package name. The format - // is . - string base_type_name = 1; - repeated int32 extension_number = 2; -} - -// A list of ServiceResponse sent by the server answering list_services request. -message ListServiceResponse { - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - repeated ServiceResponse service = 1; -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -message ServiceResponse { - // Full name of a registered service, including its package name. The format - // is . - string name = 1; -} - -// The error code and error message sent by the server when an error occurs. -message ErrorResponse { - // This field uses the error codes defined in grpc::StatusCode. - int32 error_code = 1; - string error_message = 2; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index b8e76a87dc..ed54ab1378 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -151,5 +153,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", + Metadata: "grpc/reflection/v1alpha/reflection.proto", } diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 81344abd77..e2f9ebfbbc 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -23,6 +23,7 @@ The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" s := grpc.NewServer() @@ -32,7 +33,6 @@ To register server reflection on a gRPC server: reflection.Register(s) s.Serve(lis) - */ package reflection // import "google.golang.org/grpc/reflection" @@ -42,12 +42,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" + + v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,7 +65,7 @@ var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { svr := NewServer(ServerOptions{Services: s}) - rpb.RegisterServerReflectionServer(s, svr) + v1alphagrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -74,7 +76,7 @@ func Register(s GRPCServer) { // for a custom implementation to return zero values for the // grpc.ServiceInfo values in the map. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -85,7 +87,7 @@ type ServiceInfoProvider interface { // ExtensionResolver is the interface used to query details about extensions. // This interface is satisfied by protoregistry.GlobalTypes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -96,7 +98,7 @@ type ExtensionResolver interface { // ServerOptions represents the options used to construct a reflection server. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -120,11 +122,11 @@ type ServerOptions struct { // This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // -// Experimental +// # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) rpb.ServerReflectionServer { +func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -139,7 +141,7 @@ func NewServer(opts ServerOptions) rpb.ServerReflectionServer { } type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer + v1alphagrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -213,11 +215,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &rpb.ServiceResponse{Name: svc}) + resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -226,7 +228,7 @@ func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -237,79 +239,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio return err } - out := &rpb.ServerReflectionResponse{ + out := &v1alphapb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *rpb.ServerReflectionRequest_FileByFilename: + case *v1alphapb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingSymbol: + case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingExtension: + case *v1alphapb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphapb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *rpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &rpb.ListServiceResponse{ + case *v1alphapb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphapb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafc..a6f26c8ab0 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index ca2e35a359..654e9ce69f 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -24,6 +24,7 @@ import ( "context" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -96,7 +97,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -236,20 +237,17 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string // Deprecated: use URL.Host instead. Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -257,6 +255,24 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b..cb7020ebec 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -198,7 +197,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +219,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +241,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +281,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error { func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -328,7 +329,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +352,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +370,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +380,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +417,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +445,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +456,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +481,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +498,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +509,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +549,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -710,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -745,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -758,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f4dde72b41..d5a6e78be4 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 01bbb2025a..f22acace42 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } d, err := parseDuration(m.Timeout) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f92661..35e7a20a04 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 6d163b6e38..623be39f26 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0c16cfb2ea..93231af2ac 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -39,6 +39,7 @@ import ( imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -408,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -430,7 +438,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -447,6 +455,25 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metatada != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metatada) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -521,12 +548,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -744,17 +771,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -1087,12 +1122,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1448,6 +1483,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5..bfa5dfa40e 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d472ca6430..fe552c315b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.50.1" +const Version = "1.53.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index c3fc8253b1..3728aed04f 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -66,8 +66,21 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +94,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -91,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. @@ -109,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -119,8 +125,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go deleted file mode 100644 index 1b2085d469..0000000000 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ /dev/null @@ -1,591 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/field_mask.proto - -// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. -// -// The FieldMask message represents a set of symbolic field paths. -// The paths are specific to some target message type, -// which is not stored within the FieldMask message itself. -// -// -// Constructing a FieldMask -// -// The New function is used construct a FieldMask: -// -// var messageType *descriptorpb.DescriptorProto -// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") -// if err != nil { -// ... // handle error -// } -// ... // make use of fm -// -// The "field.name" and "field.number" paths are valid paths according to the -// google.protobuf.DescriptorProto message. Use of a path that does not correlate -// to valid fields reachable from DescriptorProto would result in an error. -// -// Once a FieldMask message has been constructed, -// the Append method can be used to insert additional paths to the path set: -// -// var messageType *descriptorpb.DescriptorProto -// if err := fm.Append(messageType, "options"); err != nil { -// ... // handle error -// } -// -// -// Type checking a FieldMask -// -// In order to verify that a FieldMask represents a set of fields that are -// reachable from some target message type, use the IsValid method: -// -// var messageType *descriptorpb.DescriptorProto -// if fm.IsValid(messageType) { -// ... // make use of fm -// } -// -// IsValid needs to be passed the target message type as an input since the -// FieldMask message itself does not store the message type that the set of paths -// are for. -package fieldmaskpb - -import ( - proto "google.golang.org/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sort "sort" - strings "strings" - sync "sync" -) - -// `FieldMask` represents a set of symbolic field paths, for example: -// -// paths: "f.a" -// paths: "f.b.d" -// -// Here `f` represents a field in some root message, `a` and `b` -// fields in the message found in `f`, and `d` a field found in the -// message in `f.b`. -// -// Field masks are used to specify a subset of fields that should be -// returned by a get operation or modified by an update operation. -// Field masks also have a custom JSON encoding (see below). -// -// # Field Masks in Projections -// -// When used in the context of a projection, a response message or -// sub-message is filtered by the API to only contain those fields as -// specified in the mask. For example, if the mask in the previous -// example is applied to a response message as follows: -// -// f { -// a : 22 -// b { -// d : 1 -// x : 2 -// } -// y : 13 -// } -// z: 8 -// -// The result will not contain specific values for fields x,y and z -// (their value will be set to the default, and omitted in proto text -// output): -// -// -// f { -// a : 22 -// b { -// d : 1 -// } -// } -// -// A repeated field is not allowed except at the last position of a -// paths string. -// -// If a FieldMask object is not present in a get operation, the -// operation applies to all fields (as if a FieldMask of all fields -// had been specified). -// -// Note that a field mask does not necessarily apply to the -// top-level response message. In case of a REST get operation, the -// field mask applies directly to the response, but in case of a REST -// list operation, the mask instead applies to each individual message -// in the returned resource list. In case of a REST custom method, -// other definitions may be used. Where the mask applies will be -// clearly documented together with its declaration in the API. In -// any case, the effect on the returned resource/resources is required -// behavior for APIs. -// -// # Field Masks in Update Operations -// -// A field mask in update operations specifies which fields of the -// targeted resource are going to be updated. The API is required -// to only change the values of the fields as specified in the mask -// and leave the others untouched. If a resource is passed in to -// describe the updated values, the API ignores the values of all -// fields not covered by the mask. -// -// If a repeated field is specified for an update operation, new values will -// be appended to the existing repeated field in the target resource. Note that -// a repeated field is only allowed in the last position of a `paths` string. -// -// If a sub-message is specified in the last position of the field mask for an -// update operation, then new value will be merged into the existing sub-message -// in the target resource. -// -// For example, given the target message: -// -// f { -// b { -// d: 1 -// x: 2 -// } -// c: [1] -// } -// -// And an update message: -// -// f { -// b { -// d: 10 -// } -// c: [2] -// } -// -// then if the field mask is: -// -// paths: ["f.b", "f.c"] -// -// then the result will be: -// -// f { -// b { -// d: 10 -// x: 2 -// } -// c: [1, 2] -// } -// -// An implementation may provide options to override this default behavior for -// repeated and message fields. -// -// In order to reset a field's value to the default, the field must -// be in the mask and set to the default value in the provided resource. -// Hence, in order to reset all fields of a resource, provide a default -// instance of the resource and set all fields in the mask, or do -// not provide a mask as described below. -// -// If a field mask is not present on update, the operation applies to -// all fields (as if a field mask of all fields has been specified). -// Note that in the presence of schema evolution, this may mean that -// fields the client does not know and has therefore not filled into -// the request will be reset to their default. If this is unwanted -// behavior, a specific service may require a client to always specify -// a field mask, producing an error if not. -// -// As with get operations, the location of the resource which -// describes the updated values in the request message depends on the -// operation kind. In any case, the effect of the field mask is -// required to be honored by the API. -// -// ## Considerations for HTTP REST -// -// The HTTP kind of an update operation which uses a field mask must -// be set to PATCH instead of PUT in order to satisfy HTTP semantics -// (PUT must only be used for full updates). -// -// # JSON Encoding of Field Masks -// -// In JSON, a field mask is encoded as a single string where paths are -// separated by a comma. Fields name in each path are converted -// to/from lower-camel naming conventions. -// -// As an example, consider the following message declarations: -// -// message Profile { -// User user = 1; -// Photo photo = 2; -// } -// message User { -// string display_name = 1; -// string address = 2; -// } -// -// In proto a field mask for `Profile` may look as such: -// -// mask { -// paths: "user.display_name" -// paths: "photo" -// } -// -// In JSON, the same mask is represented as below: -// -// { -// mask: "user.displayName,photo" -// } -// -// # Field Masks and Oneof Fields -// -// Field masks treat fields in oneofs just as regular fields. Consider the -// following message: -// -// message SampleMessage { -// oneof test_oneof { -// string name = 4; -// SubMessage sub_message = 9; -// } -// } -// -// The field mask can be: -// -// mask { -// paths: "name" -// } -// -// Or: -// -// mask { -// paths: "sub_message" -// } -// -// Note that oneof type names ("test_oneof" in this case) cannot be used in -// paths. -// -// ## Field Mask Verification -// -// The implementation of any API method which has a FieldMask type field in the -// request should verify the included field paths, and return an -// `INVALID_ARGUMENT` error if any path is unmappable. -type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` -} - -// New constructs a field mask from a list of paths and verifies that -// each one is valid according to the specified message type. -func New(m proto.Message, paths ...string) (*FieldMask, error) { - x := new(FieldMask) - return x, x.Append(m, paths...) -} - -// Union returns the union of all the paths in the input field masks. -func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { - var out []string - out = append(out, mx.GetPaths()...) - out = append(out, my.GetPaths()...) - for _, m := range ms { - out = append(out, m.GetPaths()...) - } - return &FieldMask{Paths: normalizePaths(out)} -} - -// Intersect returns the intersection of all the paths in the input field masks. -func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { - var ss1, ss2 []string // reused buffers for performance - intersect := func(out, in []string) []string { - ss1 = normalizePaths(append(ss1[:0], in...)) - ss2 = normalizePaths(append(ss2[:0], out...)) - out = out[:0] - for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { - switch s1, s2 := ss1[i1], ss2[i2]; { - case hasPathPrefix(s1, s2): - out = append(out, s1) - i1++ - case hasPathPrefix(s2, s1): - out = append(out, s2) - i2++ - case lessPath(s1, s2): - i1++ - case lessPath(s2, s1): - i2++ - } - } - return out - } - - out := Union(mx, my, ms...).GetPaths() - out = intersect(out, mx.GetPaths()) - out = intersect(out, my.GetPaths()) - for _, m := range ms { - out = intersect(out, m.GetPaths()) - } - return &FieldMask{Paths: normalizePaths(out)} -} - -// IsValid reports whether all the paths are syntactically valid and -// refer to known fields in the specified message type. -// It reports false for a nil FieldMask. -func (x *FieldMask) IsValid(m proto.Message) bool { - paths := x.GetPaths() - return x != nil && numValidPaths(m, paths) == len(paths) -} - -// Append appends a list of paths to the mask and verifies that each one -// is valid according to the specified message type. -// An invalid path is not appended and breaks insertion of subsequent paths. -func (x *FieldMask) Append(m proto.Message, paths ...string) error { - numValid := numValidPaths(m, paths) - x.Paths = append(x.Paths, paths[:numValid]...) - paths = paths[numValid:] - if len(paths) > 0 { - name := m.ProtoReflect().Descriptor().FullName() - return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) - } - return nil -} - -func numValidPaths(m proto.Message, paths []string) int { - md0 := m.ProtoReflect().Descriptor() - for i, path := range paths { - md := md0 - if !rangeFields(path, func(field string) bool { - // Search the field within the message. - if md == nil { - return false // not within a message - } - fd := md.Fields().ByName(protoreflect.Name(field)) - // The real field name of a group is the message name. - if fd == nil { - gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { - fd = gd - } - } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { - fd = nil - } - if fd == nil { - return false // message has does not have this field - } - - // Identify the next message to search within. - md = fd.Message() // may be nil - - // Repeated fields are only allowed at the last position. - if fd.IsList() || fd.IsMap() { - md = nil - } - - return true - }) { - return i - } - } - return len(paths) -} - -// Normalize converts the mask to its canonical form where all paths are sorted -// and redundant paths are removed. -func (x *FieldMask) Normalize() { - x.Paths = normalizePaths(x.Paths) -} - -func normalizePaths(paths []string) []string { - sort.Slice(paths, func(i, j int) bool { - return lessPath(paths[i], paths[j]) - }) - - // Elide any path that is a prefix match on the previous. - out := paths[:0] - for _, path := range paths { - if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { - continue - } - out = append(out, path) - } - return out -} - -// hasPathPrefix is like strings.HasPrefix, but further checks for either -// an exact matche or that the prefix is delimited by a dot. -func hasPathPrefix(path, prefix string) bool { - return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') -} - -// lessPath is a lexicographical comparison where dot is specially treated -// as the smallest symbol. -func lessPath(x, y string) bool { - for i := 0; i < len(x) && i < len(y); i++ { - if x[i] != y[i] { - return (x[i] - '.') < (y[i] - '.') - } - } - return len(x) < len(y) -} - -// rangeFields is like strings.Split(path, "."), but avoids allocations by -// iterating over each field in place and calling a iterator function. -func rangeFields(path string, f func(field string) bool) bool { - for { - var field string - if i := strings.IndexByte(path, '.'); i >= 0 { - field, path = path[:i], path[i:] - } else { - field, path = path, "" - } - - if !f(field) { - return false - } - - if len(path) == 0 { - return true - } - path = strings.TrimPrefix(path, ".") - } -} - -func (x *FieldMask) Reset() { - *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldMask) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldMask) ProtoMessage() {} - -func (x *FieldMask) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. -func (*FieldMask) Descriptor() ([]byte, []int) { - return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} -} - -func (x *FieldMask) GetPaths() []string { - if x != nil { - return x.Paths - } - return nil -} - -var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor - -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, - 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc -) - -func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { - file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) - }) - return file_google_protobuf_field_mask_proto_rawDescData -} - -var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ - (*FieldMask)(nil), // 0: google.protobuf.FieldMask -} -var file_google_protobuf_field_mask_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_field_mask_proto_init() } -func file_google_protobuf_field_mask_proto_init() { - if File_google_protobuf_field_mask_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_field_mask_proto_goTypes, - DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, - MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, - }.Build() - File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil - file_google_protobuf_field_mask_proto_goTypes = nil - file_google_protobuf_field_mask_proto_depIdxs = nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7cd4cf2c9a..f82338f6f3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -28,7 +28,7 @@ github.com/cenkalti/backoff/v3 # github.com/cenkalti/backoff/v4 v4.1.3 ## explicit; go 1.13 github.com/cenkalti/backoff/v4 -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/containerd/continuity v0.3.0 @@ -37,6 +37,9 @@ github.com/containerd/continuity/pathdriver # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 +## explicit; go 1.17 +github.com/decred/dcrd/dcrec/secp256k1/v4 # github.com/deepmap/oapi-codegen v1.8.2 ## explicit; go 1.14 github.com/deepmap/oapi-codegen/pkg/runtime @@ -107,7 +110,6 @@ github.com/go-kit/kit/metrics/internal/lv github.com/go-kit/kit/metrics/prometheus github.com/go-kit/kit/sd github.com/go-kit/kit/sd/lb -github.com/go-kit/kit/tracing/opentracing github.com/go-kit/kit/transport github.com/go-kit/kit/transport/grpc github.com/go-kit/kit/transport/http @@ -117,6 +119,13 @@ github.com/go-kit/log # github.com/go-logfmt/logfmt v0.6.0 ## explicit; go 1.17 github.com/go-logfmt/logfmt +# github.com/go-logr/logr v1.2.3 +## explicit; go 1.16 +github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-redis/redis/v8 v8.11.5 ## explicit; go 1.17 github.com/go-redis/redis/v8 @@ -130,6 +139,17 @@ github.com/go-redis/redis/v8/internal/util # github.com/go-zoo/bone v1.3.0 ## explicit; go 1.9 github.com/go-zoo/bone +# github.com/goccy/go-json v0.9.11 +## explicit; go 1.12 +github.com/goccy/go-json +github.com/goccy/go-json/internal/decoder +github.com/goccy/go-json/internal/encoder +github.com/goccy/go-json/internal/encoder/vm +github.com/goccy/go-json/internal/encoder/vm_color +github.com/goccy/go-json/internal/encoder/vm_color_indent +github.com/goccy/go-json/internal/encoder/vm_indent +github.com/goccy/go-json/internal/errors +github.com/goccy/go-json/internal/runtime # github.com/gocql/gocql v1.2.1 ## explicit; go 1.13 github.com/gocql/gocql @@ -142,9 +162,6 @@ github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto -# github.com/golang-jwt/jwt/v4 v4.5.0 -## explicit; go 1.16 -github.com/golang-jwt/jwt/v4 # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb @@ -160,6 +177,12 @@ github.com/golang/snappy # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex +# github.com/gookit/color v1.5.3 +## explicit; go 1.18 +github.com/gookit/color +# github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e +## explicit +github.com/goombaio/namegenerator # github.com/gopcua/opcua v0.1.6 ## explicit; go 1.12 github.com/gopcua/opcua @@ -286,6 +309,9 @@ github.com/influxdata/influxdb-client-go/v2/log # github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 ## explicit; go 1.13 github.com/influxdata/line-protocol +# github.com/ivanpirog/coloredcobra v1.0.1 +## explicit; go 1.15 +github.com/ivanpirog/coloredcobra # github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa ## explicit; go 1.12 github.com/jackc/pgerrcode @@ -328,6 +354,45 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/lestrrat-go/blackmagic v1.0.1 +## explicit; go 1.16 +github.com/lestrrat-go/blackmagic +# github.com/lestrrat-go/httpcc v1.0.1 +## explicit; go 1.16 +github.com/lestrrat-go/httpcc +# github.com/lestrrat-go/httprc v1.0.4 +## explicit; go 1.17 +github.com/lestrrat-go/httprc +# github.com/lestrrat-go/iter v1.0.2 +## explicit; go 1.13 +github.com/lestrrat-go/iter/arrayiter +github.com/lestrrat-go/iter/mapiter +# github.com/lestrrat-go/jwx/v2 v2.0.8 +## explicit; go 1.16 +github.com/lestrrat-go/jwx/v2 +github.com/lestrrat-go/jwx/v2/cert +github.com/lestrrat-go/jwx/v2/internal/base64 +github.com/lestrrat-go/jwx/v2/internal/ecutil +github.com/lestrrat-go/jwx/v2/internal/iter +github.com/lestrrat-go/jwx/v2/internal/json +github.com/lestrrat-go/jwx/v2/internal/keyconv +github.com/lestrrat-go/jwx/v2/internal/pool +github.com/lestrrat-go/jwx/v2/jwa +github.com/lestrrat-go/jwx/v2/jwe +github.com/lestrrat-go/jwx/v2/jwe/internal/aescbc +github.com/lestrrat-go/jwx/v2/jwe/internal/cipher +github.com/lestrrat-go/jwx/v2/jwe/internal/concatkdf +github.com/lestrrat-go/jwx/v2/jwe/internal/content_crypt +github.com/lestrrat-go/jwx/v2/jwe/internal/keyenc +github.com/lestrrat-go/jwx/v2/jwe/internal/keygen +github.com/lestrrat-go/jwx/v2/jwk +github.com/lestrrat-go/jwx/v2/jws +github.com/lestrrat-go/jwx/v2/jwt +github.com/lestrrat-go/jwx/v2/jwt/internal/types +github.com/lestrrat-go/jwx/v2/x25519 +# github.com/lestrrat-go/option v1.0.0 +## explicit; go 1.16 +github.com/lestrrat-go/option # github.com/magiconair/properties v1.8.6 ## explicit; go 1.13 github.com/magiconair/properties @@ -402,9 +467,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opentracing/opentracing-go v1.2.0 ## explicit; go 1.14 github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -github.com/opentracing/opentracing-go/mocktracer # github.com/ory/dockertest/v3 v3.9.1 ## explicit; go 1.17 github.com/ory/dockertest/v3 @@ -430,9 +493,6 @@ github.com/ory/dockertest/v3/docker/types/network github.com/ory/dockertest/v3/docker/types/registry github.com/ory/dockertest/v3/docker/types/strslice github.com/ory/dockertest/v3/docker/types/versions -# github.com/ory/keto/proto/ory/keto/acl/v1alpha1 v0.0.0-20210616104402-80e043246cf9 -## explicit; go 1.16 -github.com/ory/keto/proto/ory/keto/acl/v1alpha1 # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml @@ -570,36 +630,17 @@ github.com/spf13/viper/internal/encoding/javaproperties github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml -# github.com/stretchr/testify v1.8.1 +# github.com/stretchr/objx v0.5.0 +## explicit; go 1.12 +github.com/stretchr/objx +# github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert +github.com/stretchr/testify/mock github.com/stretchr/testify/require # github.com/subosito/gotenv v1.4.1 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/uber/jaeger-client-go v2.30.0+incompatible -## explicit -github.com/uber/jaeger-client-go -github.com/uber/jaeger-client-go/config -github.com/uber/jaeger-client-go/internal/baggage -github.com/uber/jaeger-client-go/internal/baggage/remote -github.com/uber/jaeger-client-go/internal/reporterstats -github.com/uber/jaeger-client-go/internal/spanlog -github.com/uber/jaeger-client-go/internal/throttler -github.com/uber/jaeger-client-go/internal/throttler/remote -github.com/uber/jaeger-client-go/log -github.com/uber/jaeger-client-go/rpcmetrics -github.com/uber/jaeger-client-go/thrift -github.com/uber/jaeger-client-go/thrift-gen/agent -github.com/uber/jaeger-client-go/thrift-gen/baggage -github.com/uber/jaeger-client-go/thrift-gen/jaeger -github.com/uber/jaeger-client-go/thrift-gen/sampling -github.com/uber/jaeger-client-go/thrift-gen/zipkincore -github.com/uber/jaeger-client-go/transport -github.com/uber/jaeger-client-go/utils -# github.com/uber/jaeger-lib v2.4.1+incompatible -## explicit -github.com/uber/jaeger-lib/metrics # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 @@ -621,6 +662,9 @@ github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 ## explicit github.com/xeipuuv/gojsonschema +# github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 +## explicit; go 1.15 +github.com/xo/terminfo # github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a ## explicit; go 1.12 github.com/youmark/pkcs8 @@ -661,6 +705,53 @@ go.mongodb.org/mongo-driver/x/mongo/driver/operation go.mongodb.org/mongo-driver/x/mongo/driver/session go.mongodb.org/mongo-driver/x/mongo/driver/topology go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage +# go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit v0.38.0 +## explicit; go 1.18 +go.opentelemetry.io/contrib/instrumentation/github.com/go-kit/kit/otelkit +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 +## explicit; go 1.18 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +# go.opentelemetry.io/contrib/propagators/jaeger v1.15.0 +## explicit; go 1.18 +go.opentelemetry.io/contrib/propagators/jaeger +# go.opentelemetry.io/otel v1.14.0 +## explicit; go 1.18 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal +go.opentelemetry.io/otel/semconv/v1.12.0 +go.opentelemetry.io/otel/semconv/v1.17.0 +# go.opentelemetry.io/otel/exporters/jaeger v1.12.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/exporters/jaeger +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger +go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore +go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift +# go.opentelemetry.io/otel/metric v0.37.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/global +go.opentelemetry.io/otel/metric/instrument +go.opentelemetry.io/otel/metric/internal/global +# go.opentelemetry.io/otel/sdk v1.12.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal +go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/resource +go.opentelemetry.io/otel/sdk/trace +# go.opentelemetry.io/otel/trace v1.14.0 +## explicit; go 1.18 +go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic @@ -708,6 +799,7 @@ golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/text v0.9.0 ## explicit; go 1.17 golang.org/x/text/cases @@ -768,11 +860,10 @@ gonum.org/v1/gonum/lapack/gonum gonum.org/v1/gonum/lapack/lapack64 gonum.org/v1/gonum/mat gonum.org/v1/gonum/stat -# google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71 +# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.50.1 +# google.golang.org/grpc v1.53.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -860,7 +951,6 @@ google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb -google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc ## explicit diff --git a/ws/README.md b/ws/README.md index 44df865d0d..81bdd61fe8 100644 --- a/ws/README.md +++ b/ws/README.md @@ -53,4 +53,4 @@ $GOBIN/mainflux-ws ## Usage For more information about service capabilities and its usage, please check out -the [WebSocket paragraph](https://mainflux.readthedocs.io/en/latest/messaging/#websocket) in the Getting Started guide. \ No newline at end of file +the [WebSocket paragraph](https://mainflux.readthedocs.io/en/latest/messaging/#websocket) in the Getting Started guide. diff --git a/ws/adapter.go b/ws/adapter.go index 23a70a4474..3a4ef2506e 100644 --- a/ws/adapter.go +++ b/ws/adapter.go @@ -9,9 +9,9 @@ import ( "context" "fmt" - "github.com/mainflux/mainflux" "github.com/mainflux/mainflux/pkg/errors" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/things/policies" ) const ( @@ -59,12 +59,12 @@ type Service interface { var _ Service = (*adapterService)(nil) type adapterService struct { - auth mainflux.ThingsServiceClient + auth policies.ThingsServiceClient pubsub messaging.PubSub } // New instantiates the WS adapter implementation -func New(auth mainflux.ThingsServiceClient, pubsub messaging.PubSub) Service { +func New(auth policies.ThingsServiceClient, pubsub messaging.PubSub) Service { return &adapterService{ auth: auth, pubsub: pubsub, @@ -82,7 +82,7 @@ func (svc *adapterService) Publish(ctx context.Context, thingKey string, msg *me return ErrFailedMessagePublish } - msg.Publisher = thid.GetValue() + msg.Publisher = thid if err := svc.pubsub.Publish(ctx, msg.GetChannel(), msg); err != nil { return ErrFailedMessagePublish @@ -102,14 +102,14 @@ func (svc *adapterService) Subscribe(ctx context.Context, thingKey, chanID, subt return ErrUnauthorizedAccess } - c.id = thid.GetValue() + c.id = thid subject := fmt.Sprintf("%s.%s", chansPrefix, chanID) if subtopic != "" { subject = fmt.Sprintf("%s.%s", subject, subtopic) } - if err := svc.pubsub.Subscribe(ctx, thid.GetValue(), subject, c); err != nil { + if err := svc.pubsub.Subscribe(ctx, thid, subject, c); err != nil { return ErrFailedSubscription } @@ -132,18 +132,23 @@ func (svc *adapterService) Unsubscribe(ctx context.Context, thingKey, chanID, su subject = fmt.Sprintf("%s.%s", subject, subtopic) } - return svc.pubsub.Unsubscribe(ctx, thid.GetValue(), subject) + return svc.pubsub.Unsubscribe(ctx, thid, subject) } -func (svc *adapterService) authorize(ctx context.Context, thingKey, chanID string) (*mainflux.ThingID, error) { - ar := &mainflux.AccessByKeyReq{ - Token: thingKey, - ChanID: chanID, +func (svc *adapterService) authorize(ctx context.Context, thingKey, chanID string) (string, error) { + ar := &policies.AuthorizeReq{ + Sub: thingKey, + Obj: chanID, + Act: policies.ReadAction, + EntityType: policies.GroupEntityType, } - thid, err := svc.auth.CanAccessByKey(ctx, ar) + res, err := svc.auth.Authorize(ctx, ar) if err != nil { - return nil, errors.Wrap(errors.ErrAuthorization, err) + return "", errors.Wrap(errors.ErrAuthorization, err) + } + if !res.GetAuthorized() { + return "", errors.Wrap(errors.ErrAuthorization, err) } - return thid, nil + return res.GetThingID(), nil } diff --git a/ws/adapter_test.go b/ws/adapter_test.go index 4a76799fa9..0b4ac779e6 100644 --- a/ws/adapter_test.go +++ b/ws/adapter_test.go @@ -8,9 +8,9 @@ import ( "fmt" "testing" - "github.com/mainflux/mainflux" httpmock "github.com/mainflux/mainflux/http/mocks" "github.com/mainflux/mainflux/pkg/messaging" + "github.com/mainflux/mainflux/things/policies" "github.com/mainflux/mainflux/ws" "github.com/mainflux/mainflux/ws/mocks" "github.com/stretchr/testify/assert" @@ -32,7 +32,7 @@ var msg = messaging.Message{ Payload: []byte(`[{"n":"current","t":-5,"v":1.2}]`), } -func newService(cc mainflux.ThingsServiceClient) (ws.Service, mocks.MockPubSub) { +func newService(cc policies.ThingsServiceClient) (ws.Service, mocks.MockPubSub) { pubsub := mocks.NewPubSub() return ws.New(cc, pubsub), pubsub } diff --git a/ws/api/endpoint_test.go b/ws/api/endpoint_test.go index da31188803..112331f02d 100644 --- a/ws/api/endpoint_test.go +++ b/ws/api/endpoint_test.go @@ -11,9 +11,9 @@ import ( "testing" "github.com/gorilla/websocket" - "github.com/mainflux/mainflux" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" + "github.com/mainflux/mainflux/things/policies" "github.com/mainflux/mainflux/ws" httpmock "github.com/mainflux/mainflux/http/mocks" @@ -31,13 +31,13 @@ const ( var msg = []byte(`[{"n":"current","t":-1,"v":1.6}]`) -func newService(cc mainflux.ThingsServiceClient) (ws.Service, mocks.MockPubSub) { +func newService(cc policies.ThingsServiceClient) (ws.Service, mocks.MockPubSub) { pubsub := mocks.NewPubSub() return ws.New(cc, pubsub), pubsub } func newHTTPServer(svc ws.Service) *httptest.Server { - logger := log.NewMock() + logger := mflog.NewMock() mux := api.MakeHandler(svc, logger) return httptest.NewServer(mux) } diff --git a/ws/api/logging.go b/ws/api/logging.go index 178dd4a0a9..42be2c7c5d 100644 --- a/ws/api/logging.go +++ b/ws/api/logging.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/ws" ) @@ -18,12 +18,12 @@ import ( var _ ws.Service = (*loggingMiddleware)(nil) type loggingMiddleware struct { - logger log.Logger + logger mflog.Logger svc ws.Service } // LoggingMiddleware adds logging facilities to the adapter -func LoggingMiddleware(svc ws.Service, logger log.Logger) ws.Service { +func LoggingMiddleware(svc ws.Service, logger mflog.Logger) ws.Service { return &loggingMiddleware{logger, svc} } diff --git a/ws/api/transport.go b/ws/api/transport.go index 3af1dc33cd..7c7c88839f 100644 --- a/ws/api/transport.go +++ b/ws/api/transport.go @@ -10,7 +10,7 @@ import ( "github.com/go-zoo/bone" "github.com/gorilla/websocket" "github.com/mainflux/mainflux" - log "github.com/mainflux/mainflux/logger" + mflog "github.com/mainflux/mainflux/logger" "github.com/mainflux/mainflux/ws" "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -31,11 +31,11 @@ var ( WriteBufferSize: readwriteBufferSize, CheckOrigin: func(r *http.Request) bool { return true }, } - logger log.Logger + logger mflog.Logger ) // MakeHandler returns http handler with handshake endpoint. -func MakeHandler(svc ws.Service, l log.Logger) http.Handler { +func MakeHandler(svc ws.Service, l mflog.Logger) http.Handler { logger = l mux := bone.New() diff --git a/ws/tracing/tracing.go b/ws/tracing/tracing.go index 929f5bf497..23a97451ce 100644 --- a/ws/tracing/tracing.go +++ b/ws/tracing/tracing.go @@ -5,7 +5,7 @@ import ( "github.com/mainflux/mainflux/pkg/messaging" "github.com/mainflux/mainflux/ws" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) var _ ws.Service = (*tracingMiddleware)(nil) @@ -17,12 +17,12 @@ const ( ) type tracingMiddleware struct { - tracer opentracing.Tracer + tracer trace.Tracer svc ws.Service } // New returns a new ws.Service that traces incoming requests using the given tracer. -func New(tracer opentracing.Tracer, svc ws.Service) ws.Service { +func New(tracer trace.Tracer, svc ws.Service) ws.Service { return &tracingMiddleware{ tracer: tracer, svc: svc, @@ -31,35 +31,24 @@ func New(tracer opentracing.Tracer, svc ws.Service) ws.Service { // Publish traces the "Publish" operation of the wrapped ws.Service. func (tm *tracingMiddleware) Publish(ctx context.Context, thingKey string, msg *messaging.Message) error { - span := tm.createSpan(ctx, publishOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := tm.tracer.Start(ctx, publishOP) + defer span.End() + return tm.svc.Publish(ctx, thingKey, msg) } // Subscribe traces the "Subscribe" operation of the wrapped ws.Service. func (tm *tracingMiddleware) Subscribe(ctx context.Context, thingKey string, chanID string, subtopic string, client *ws.Client) error { - span := tm.createSpan(ctx, subscribeOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) + ctx, span := tm.tracer.Start(ctx, subscribeOP) + defer span.End() + return tm.svc.Subscribe(ctx, thingKey, chanID, subtopic, client) } // Unsubscribe traces the "Unsubscribe" operation of the wrapped ws.Service. func (tm *tracingMiddleware) Unsubscribe(ctx context.Context, thingKey string, chanID string, subtopic string) error { - span := tm.createSpan(ctx, unsubscribeOP) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - return tm.svc.Unsubscribe(ctx, thingKey, chanID, subtopic) -} + ctx, span := tm.tracer.Start(ctx, unsubscribeOP) + defer span.End() -// createSpan creates a new tracing span using the given context and operation name. -func (tm *tracingMiddleware) createSpan(ctx context.Context, opName string) opentracing.Span { - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - return tm.tracer.StartSpan( - opName, - opentracing.ChildOf(parentSpan.Context()), - ) - } - return tm.tracer.StartSpan(opName) + return tm.svc.Unsubscribe(ctx, thingKey, chanID, subtopic) }